xref: /linux/include/linux/sched.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 #include <linux/sched/prio.h>
7 
8 
9 struct sched_param {
10 	int sched_priority;
11 };
12 
13 #include <asm/param.h>	/* for HZ */
14 
15 #include <linux/capability.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/timex.h>
20 #include <linux/jiffies.h>
21 #include <linux/plist.h>
22 #include <linux/rbtree.h>
23 #include <linux/thread_info.h>
24 #include <linux/cpumask.h>
25 #include <linux/errno.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm_types.h>
28 #include <linux/preempt.h>
29 
30 #include <asm/page.h>
31 #include <asm/ptrace.h>
32 #include <linux/cputime.h>
33 
34 #include <linux/smp.h>
35 #include <linux/sem.h>
36 #include <linux/shm.h>
37 #include <linux/signal.h>
38 #include <linux/compiler.h>
39 #include <linux/completion.h>
40 #include <linux/pid.h>
41 #include <linux/percpu.h>
42 #include <linux/topology.h>
43 #include <linux/proportions.h>
44 #include <linux/seccomp.h>
45 #include <linux/rcupdate.h>
46 #include <linux/rculist.h>
47 #include <linux/rtmutex.h>
48 
49 #include <linux/time.h>
50 #include <linux/param.h>
51 #include <linux/resource.h>
52 #include <linux/timer.h>
53 #include <linux/hrtimer.h>
54 #include <linux/task_io_accounting.h>
55 #include <linux/latencytop.h>
56 #include <linux/cred.h>
57 #include <linux/llist.h>
58 #include <linux/uidgid.h>
59 #include <linux/gfp.h>
60 #include <linux/magic.h>
61 #include <linux/cgroup-defs.h>
62 
63 #include <asm/processor.h>
64 
65 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
66 
67 /*
68  * Extended scheduling parameters data structure.
69  *
70  * This is needed because the original struct sched_param can not be
71  * altered without introducing ABI issues with legacy applications
72  * (e.g., in sched_getparam()).
73  *
74  * However, the possibility of specifying more than just a priority for
75  * the tasks may be useful for a wide variety of application fields, e.g.,
76  * multimedia, streaming, automation and control, and many others.
77  *
78  * This variant (sched_attr) is meant at describing a so-called
79  * sporadic time-constrained task. In such model a task is specified by:
80  *  - the activation period or minimum instance inter-arrival time;
81  *  - the maximum (or average, depending on the actual scheduling
82  *    discipline) computation time of all instances, a.k.a. runtime;
83  *  - the deadline (relative to the actual activation time) of each
84  *    instance.
85  * Very briefly, a periodic (sporadic) task asks for the execution of
86  * some specific computation --which is typically called an instance--
87  * (at most) every period. Moreover, each instance typically lasts no more
88  * than the runtime and must be completed by time instant t equal to
89  * the instance activation time + the deadline.
90  *
91  * This is reflected by the actual fields of the sched_attr structure:
92  *
93  *  @size		size of the structure, for fwd/bwd compat.
94  *
95  *  @sched_policy	task's scheduling policy
96  *  @sched_flags	for customizing the scheduler behaviour
97  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
98  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
99  *  @sched_deadline	representative of the task's deadline
100  *  @sched_runtime	representative of the task's runtime
101  *  @sched_period	representative of the task's period
102  *
103  * Given this task model, there are a multiplicity of scheduling algorithms
104  * and policies, that can be used to ensure all the tasks will make their
105  * timing constraints.
106  *
107  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
108  * only user of this new interface. More information about the algorithm
109  * available in the scheduling class file or in Documentation/.
110  */
111 struct sched_attr {
112 	u32 size;
113 
114 	u32 sched_policy;
115 	u64 sched_flags;
116 
117 	/* SCHED_NORMAL, SCHED_BATCH */
118 	s32 sched_nice;
119 
120 	/* SCHED_FIFO, SCHED_RR */
121 	u32 sched_priority;
122 
123 	/* SCHED_DEADLINE */
124 	u64 sched_runtime;
125 	u64 sched_deadline;
126 	u64 sched_period;
127 };
128 
129 struct futex_pi_state;
130 struct robust_list_head;
131 struct bio_list;
132 struct fs_struct;
133 struct perf_event_context;
134 struct blk_plug;
135 struct filename;
136 struct nameidata;
137 
138 #define VMACACHE_BITS 2
139 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
140 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
141 
142 /*
143  * These are the constant used to fake the fixed-point load-average
144  * counting. Some notes:
145  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
146  *    a load-average precision of 10 bits integer + 11 bits fractional
147  *  - if you want to count load-averages more often, you need more
148  *    precision, or rounding will get you. With 2-second counting freq,
149  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
150  *    11 bit fractions.
151  */
152 extern unsigned long avenrun[];		/* Load averages */
153 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154 
155 #define FSHIFT		11		/* nr of bits of precision */
156 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
157 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
158 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
159 #define EXP_5		2014		/* 1/exp(5sec/5min) */
160 #define EXP_15		2037		/* 1/exp(5sec/15min) */
161 
162 #define CALC_LOAD(load,exp,n) \
163 	load *= exp; \
164 	load += n*(FIXED_1-exp); \
165 	load >>= FSHIFT;
166 
167 extern unsigned long total_forks;
168 extern int nr_threads;
169 DECLARE_PER_CPU(unsigned long, process_counts);
170 extern int nr_processes(void);
171 extern unsigned long nr_running(void);
172 extern bool single_task_running(void);
173 extern unsigned long nr_iowait(void);
174 extern unsigned long nr_iowait_cpu(int cpu);
175 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176 
177 extern void calc_global_load(unsigned long ticks);
178 
179 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180 extern void update_cpu_load_nohz(void);
181 #else
182 static inline void update_cpu_load_nohz(void) { }
183 #endif
184 
185 extern unsigned long get_parent_ip(unsigned long addr);
186 
187 extern void dump_cpu_task(int cpu);
188 
189 struct seq_file;
190 struct cfs_rq;
191 struct task_group;
192 #ifdef CONFIG_SCHED_DEBUG
193 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194 extern void proc_sched_set_task(struct task_struct *p);
195 #endif
196 
197 /*
198  * Task state bitmask. NOTE! These bits are also
199  * encoded in fs/proc/array.c: get_task_state().
200  *
201  * We have two separate sets of flags: task->state
202  * is about runnability, while task->exit_state are
203  * about the task exiting. Confusing, but this way
204  * modifying one set can't modify the other one by
205  * mistake.
206  */
207 #define TASK_RUNNING		0
208 #define TASK_INTERRUPTIBLE	1
209 #define TASK_UNINTERRUPTIBLE	2
210 #define __TASK_STOPPED		4
211 #define __TASK_TRACED		8
212 /* in tsk->exit_state */
213 #define EXIT_DEAD		16
214 #define EXIT_ZOMBIE		32
215 #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
216 /* in tsk->state again */
217 #define TASK_DEAD		64
218 #define TASK_WAKEKILL		128
219 #define TASK_WAKING		256
220 #define TASK_PARKED		512
221 #define TASK_NOLOAD		1024
222 #define TASK_STATE_MAX		2048
223 
224 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
225 
226 extern char ___assert_task_state[1 - 2*!!(
227 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
228 
229 /* Convenience macros for the sake of set_task_state */
230 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
231 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
232 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
233 
234 #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
235 
236 /* Convenience macros for the sake of wake_up */
237 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
238 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
239 
240 /* get_task_state() */
241 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
242 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
243 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
244 
245 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
246 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
247 #define task_is_stopped_or_traced(task)	\
248 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
249 #define task_contributes_to_load(task)	\
250 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
251 				 (task->flags & PF_FROZEN) == 0 && \
252 				 (task->state & TASK_NOLOAD) == 0)
253 
254 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
255 
256 #define __set_task_state(tsk, state_value)			\
257 	do {							\
258 		(tsk)->task_state_change = _THIS_IP_;		\
259 		(tsk)->state = (state_value);			\
260 	} while (0)
261 #define set_task_state(tsk, state_value)			\
262 	do {							\
263 		(tsk)->task_state_change = _THIS_IP_;		\
264 		smp_store_mb((tsk)->state, (state_value));		\
265 	} while (0)
266 
267 /*
268  * set_current_state() includes a barrier so that the write of current->state
269  * is correctly serialised wrt the caller's subsequent test of whether to
270  * actually sleep:
271  *
272  *	set_current_state(TASK_UNINTERRUPTIBLE);
273  *	if (do_i_need_to_sleep())
274  *		schedule();
275  *
276  * If the caller does not need such serialisation then use __set_current_state()
277  */
278 #define __set_current_state(state_value)			\
279 	do {							\
280 		current->task_state_change = _THIS_IP_;		\
281 		current->state = (state_value);			\
282 	} while (0)
283 #define set_current_state(state_value)				\
284 	do {							\
285 		current->task_state_change = _THIS_IP_;		\
286 		smp_store_mb(current->state, (state_value));		\
287 	} while (0)
288 
289 #else
290 
291 #define __set_task_state(tsk, state_value)		\
292 	do { (tsk)->state = (state_value); } while (0)
293 #define set_task_state(tsk, state_value)		\
294 	smp_store_mb((tsk)->state, (state_value))
295 
296 /*
297  * set_current_state() includes a barrier so that the write of current->state
298  * is correctly serialised wrt the caller's subsequent test of whether to
299  * actually sleep:
300  *
301  *	set_current_state(TASK_UNINTERRUPTIBLE);
302  *	if (do_i_need_to_sleep())
303  *		schedule();
304  *
305  * If the caller does not need such serialisation then use __set_current_state()
306  */
307 #define __set_current_state(state_value)		\
308 	do { current->state = (state_value); } while (0)
309 #define set_current_state(state_value)			\
310 	smp_store_mb(current->state, (state_value))
311 
312 #endif
313 
314 /* Task command name length */
315 #define TASK_COMM_LEN 16
316 
317 #include <linux/spinlock.h>
318 
319 /*
320  * This serializes "schedule()" and also protects
321  * the run-queue from deletions/modifications (but
322  * _adding_ to the beginning of the run-queue has
323  * a separate lock).
324  */
325 extern rwlock_t tasklist_lock;
326 extern spinlock_t mmlist_lock;
327 
328 struct task_struct;
329 
330 #ifdef CONFIG_PROVE_RCU
331 extern int lockdep_tasklist_lock_is_held(void);
332 #endif /* #ifdef CONFIG_PROVE_RCU */
333 
334 extern void sched_init(void);
335 extern void sched_init_smp(void);
336 extern asmlinkage void schedule_tail(struct task_struct *prev);
337 extern void init_idle(struct task_struct *idle, int cpu);
338 extern void init_idle_bootup_task(struct task_struct *idle);
339 
340 extern cpumask_var_t cpu_isolated_map;
341 
342 extern int runqueue_is_locked(int cpu);
343 
344 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
345 extern void nohz_balance_enter_idle(int cpu);
346 extern void set_cpu_sd_state_idle(void);
347 extern int get_nohz_timer_target(void);
348 #else
349 static inline void nohz_balance_enter_idle(int cpu) { }
350 static inline void set_cpu_sd_state_idle(void) { }
351 #endif
352 
353 /*
354  * Only dump TASK_* tasks. (0 for all tasks)
355  */
356 extern void show_state_filter(unsigned long state_filter);
357 
358 static inline void show_state(void)
359 {
360 	show_state_filter(0);
361 }
362 
363 extern void show_regs(struct pt_regs *);
364 
365 /*
366  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
367  * task), SP is the stack pointer of the first frame that should be shown in the back
368  * trace (or NULL if the entire call-chain of the task should be shown).
369  */
370 extern void show_stack(struct task_struct *task, unsigned long *sp);
371 
372 extern void cpu_init (void);
373 extern void trap_init(void);
374 extern void update_process_times(int user);
375 extern void scheduler_tick(void);
376 
377 extern void sched_show_task(struct task_struct *p);
378 
379 #ifdef CONFIG_LOCKUP_DETECTOR
380 extern void touch_softlockup_watchdog(void);
381 extern void touch_softlockup_watchdog_sync(void);
382 extern void touch_all_softlockup_watchdogs(void);
383 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
384 				  void __user *buffer,
385 				  size_t *lenp, loff_t *ppos);
386 extern unsigned int  softlockup_panic;
387 void lockup_detector_init(void);
388 #else
389 static inline void touch_softlockup_watchdog(void)
390 {
391 }
392 static inline void touch_softlockup_watchdog_sync(void)
393 {
394 }
395 static inline void touch_all_softlockup_watchdogs(void)
396 {
397 }
398 static inline void lockup_detector_init(void)
399 {
400 }
401 #endif
402 
403 #ifdef CONFIG_DETECT_HUNG_TASK
404 void reset_hung_task_detector(void);
405 #else
406 static inline void reset_hung_task_detector(void)
407 {
408 }
409 #endif
410 
411 /* Attach to any functions which should be ignored in wchan output. */
412 #define __sched		__attribute__((__section__(".sched.text")))
413 
414 /* Linker adds these: start and end of __sched functions */
415 extern char __sched_text_start[], __sched_text_end[];
416 
417 /* Is this address in the __sched functions? */
418 extern int in_sched_functions(unsigned long addr);
419 
420 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
421 extern signed long schedule_timeout(signed long timeout);
422 extern signed long schedule_timeout_interruptible(signed long timeout);
423 extern signed long schedule_timeout_killable(signed long timeout);
424 extern signed long schedule_timeout_uninterruptible(signed long timeout);
425 asmlinkage void schedule(void);
426 extern void schedule_preempt_disabled(void);
427 
428 extern long io_schedule_timeout(long timeout);
429 
430 static inline void io_schedule(void)
431 {
432 	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
433 }
434 
435 struct nsproxy;
436 struct user_namespace;
437 
438 #ifdef CONFIG_MMU
439 extern void arch_pick_mmap_layout(struct mm_struct *mm);
440 extern unsigned long
441 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
442 		       unsigned long, unsigned long);
443 extern unsigned long
444 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
445 			  unsigned long len, unsigned long pgoff,
446 			  unsigned long flags);
447 #else
448 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
449 #endif
450 
451 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
452 #define SUID_DUMP_USER		1	/* Dump as user of process */
453 #define SUID_DUMP_ROOT		2	/* Dump as root */
454 
455 /* mm flags */
456 
457 /* for SUID_DUMP_* above */
458 #define MMF_DUMPABLE_BITS 2
459 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
460 
461 extern void set_dumpable(struct mm_struct *mm, int value);
462 /*
463  * This returns the actual value of the suid_dumpable flag. For things
464  * that are using this for checking for privilege transitions, it must
465  * test against SUID_DUMP_USER rather than treating it as a boolean
466  * value.
467  */
468 static inline int __get_dumpable(unsigned long mm_flags)
469 {
470 	return mm_flags & MMF_DUMPABLE_MASK;
471 }
472 
473 static inline int get_dumpable(struct mm_struct *mm)
474 {
475 	return __get_dumpable(mm->flags);
476 }
477 
478 /* coredump filter bits */
479 #define MMF_DUMP_ANON_PRIVATE	2
480 #define MMF_DUMP_ANON_SHARED	3
481 #define MMF_DUMP_MAPPED_PRIVATE	4
482 #define MMF_DUMP_MAPPED_SHARED	5
483 #define MMF_DUMP_ELF_HEADERS	6
484 #define MMF_DUMP_HUGETLB_PRIVATE 7
485 #define MMF_DUMP_HUGETLB_SHARED  8
486 
487 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
488 #define MMF_DUMP_FILTER_BITS	7
489 #define MMF_DUMP_FILTER_MASK \
490 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
491 #define MMF_DUMP_FILTER_DEFAULT \
492 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
493 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
494 
495 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
496 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
497 #else
498 # define MMF_DUMP_MASK_DEFAULT_ELF	0
499 #endif
500 					/* leave room for more dump flags */
501 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
502 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
503 #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
504 
505 #define MMF_HAS_UPROBES		19	/* has uprobes */
506 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
507 
508 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
509 
510 struct sighand_struct {
511 	atomic_t		count;
512 	struct k_sigaction	action[_NSIG];
513 	spinlock_t		siglock;
514 	wait_queue_head_t	signalfd_wqh;
515 };
516 
517 struct pacct_struct {
518 	int			ac_flag;
519 	long			ac_exitcode;
520 	unsigned long		ac_mem;
521 	cputime_t		ac_utime, ac_stime;
522 	unsigned long		ac_minflt, ac_majflt;
523 };
524 
525 struct cpu_itimer {
526 	cputime_t expires;
527 	cputime_t incr;
528 	u32 error;
529 	u32 incr_error;
530 };
531 
532 /**
533  * struct cputime - snaphsot of system and user cputime
534  * @utime: time spent in user mode
535  * @stime: time spent in system mode
536  *
537  * Gathers a generic snapshot of user and system time.
538  */
539 struct cputime {
540 	cputime_t utime;
541 	cputime_t stime;
542 };
543 
544 /**
545  * struct task_cputime - collected CPU time counts
546  * @utime:		time spent in user mode, in &cputime_t units
547  * @stime:		time spent in kernel mode, in &cputime_t units
548  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
549  *
550  * This is an extension of struct cputime that includes the total runtime
551  * spent by the task from the scheduler point of view.
552  *
553  * As a result, this structure groups together three kinds of CPU time
554  * that are tracked for threads and thread groups.  Most things considering
555  * CPU time want to group these counts together and treat all three
556  * of them in parallel.
557  */
558 struct task_cputime {
559 	cputime_t utime;
560 	cputime_t stime;
561 	unsigned long long sum_exec_runtime;
562 };
563 /* Alternate field names when used to cache expirations. */
564 #define prof_exp	stime
565 #define virt_exp	utime
566 #define sched_exp	sum_exec_runtime
567 
568 #define INIT_CPUTIME	\
569 	(struct task_cputime) {					\
570 		.utime = 0,					\
571 		.stime = 0,					\
572 		.sum_exec_runtime = 0,				\
573 	}
574 
575 /*
576  * This is the atomic variant of task_cputime, which can be used for
577  * storing and updating task_cputime statistics without locking.
578  */
579 struct task_cputime_atomic {
580 	atomic64_t utime;
581 	atomic64_t stime;
582 	atomic64_t sum_exec_runtime;
583 };
584 
585 #define INIT_CPUTIME_ATOMIC \
586 	(struct task_cputime_atomic) {				\
587 		.utime = ATOMIC64_INIT(0),			\
588 		.stime = ATOMIC64_INIT(0),			\
589 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
590 	}
591 
592 #ifdef CONFIG_PREEMPT_COUNT
593 #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
594 #else
595 #define PREEMPT_DISABLED	PREEMPT_ENABLED
596 #endif
597 
598 /*
599  * Disable preemption until the scheduler is running.
600  * Reset by start_kernel()->sched_init()->init_idle().
601  *
602  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
603  * before the scheduler is active -- see should_resched().
604  */
605 #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
606 
607 /**
608  * struct thread_group_cputimer - thread group interval timer counts
609  * @cputime_atomic:	atomic thread group interval timers.
610  * @running:		non-zero when there are timers running and
611  * 			@cputime receives updates.
612  *
613  * This structure contains the version of task_cputime, above, that is
614  * used for thread group CPU timer calculations.
615  */
616 struct thread_group_cputimer {
617 	struct task_cputime_atomic cputime_atomic;
618 	int running;
619 };
620 
621 #include <linux/rwsem.h>
622 struct autogroup;
623 
624 /*
625  * NOTE! "signal_struct" does not have its own
626  * locking, because a shared signal_struct always
627  * implies a shared sighand_struct, so locking
628  * sighand_struct is always a proper superset of
629  * the locking of signal_struct.
630  */
631 struct signal_struct {
632 	atomic_t		sigcnt;
633 	atomic_t		live;
634 	int			nr_threads;
635 	struct list_head	thread_head;
636 
637 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
638 
639 	/* current thread group signal load-balancing target: */
640 	struct task_struct	*curr_target;
641 
642 	/* shared signal handling: */
643 	struct sigpending	shared_pending;
644 
645 	/* thread group exit support */
646 	int			group_exit_code;
647 	/* overloaded:
648 	 * - notify group_exit_task when ->count is equal to notify_count
649 	 * - everyone except group_exit_task is stopped during signal delivery
650 	 *   of fatal signals, group_exit_task processes the signal.
651 	 */
652 	int			notify_count;
653 	struct task_struct	*group_exit_task;
654 
655 	/* thread group stop support, overloads group_exit_code too */
656 	int			group_stop_count;
657 	unsigned int		flags; /* see SIGNAL_* flags below */
658 
659 	/*
660 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
661 	 * manager, to re-parent orphan (double-forking) child processes
662 	 * to this process instead of 'init'. The service manager is
663 	 * able to receive SIGCHLD signals and is able to investigate
664 	 * the process until it calls wait(). All children of this
665 	 * process will inherit a flag if they should look for a
666 	 * child_subreaper process at exit.
667 	 */
668 	unsigned int		is_child_subreaper:1;
669 	unsigned int		has_child_subreaper:1;
670 
671 	/* POSIX.1b Interval Timers */
672 	int			posix_timer_id;
673 	struct list_head	posix_timers;
674 
675 	/* ITIMER_REAL timer for the process */
676 	struct hrtimer real_timer;
677 	struct pid *leader_pid;
678 	ktime_t it_real_incr;
679 
680 	/*
681 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
682 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
683 	 * values are defined to 0 and 1 respectively
684 	 */
685 	struct cpu_itimer it[2];
686 
687 	/*
688 	 * Thread group totals for process CPU timers.
689 	 * See thread_group_cputimer(), et al, for details.
690 	 */
691 	struct thread_group_cputimer cputimer;
692 
693 	/* Earliest-expiration cache. */
694 	struct task_cputime cputime_expires;
695 
696 	struct list_head cpu_timers[3];
697 
698 	struct pid *tty_old_pgrp;
699 
700 	/* boolean value for session group leader */
701 	int leader;
702 
703 	struct tty_struct *tty; /* NULL if no tty */
704 
705 #ifdef CONFIG_SCHED_AUTOGROUP
706 	struct autogroup *autogroup;
707 #endif
708 	/*
709 	 * Cumulative resource counters for dead threads in the group,
710 	 * and for reaped dead child processes forked by this group.
711 	 * Live threads maintain their own counters and add to these
712 	 * in __exit_signal, except for the group leader.
713 	 */
714 	seqlock_t stats_lock;
715 	cputime_t utime, stime, cutime, cstime;
716 	cputime_t gtime;
717 	cputime_t cgtime;
718 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
719 	struct cputime prev_cputime;
720 #endif
721 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
722 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
723 	unsigned long inblock, oublock, cinblock, coublock;
724 	unsigned long maxrss, cmaxrss;
725 	struct task_io_accounting ioac;
726 
727 	/*
728 	 * Cumulative ns of schedule CPU time fo dead threads in the
729 	 * group, not including a zombie group leader, (This only differs
730 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
731 	 * other than jiffies.)
732 	 */
733 	unsigned long long sum_sched_runtime;
734 
735 	/*
736 	 * We don't bother to synchronize most readers of this at all,
737 	 * because there is no reader checking a limit that actually needs
738 	 * to get both rlim_cur and rlim_max atomically, and either one
739 	 * alone is a single word that can safely be read normally.
740 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
741 	 * protect this instead of the siglock, because they really
742 	 * have no need to disable irqs.
743 	 */
744 	struct rlimit rlim[RLIM_NLIMITS];
745 
746 #ifdef CONFIG_BSD_PROCESS_ACCT
747 	struct pacct_struct pacct;	/* per-process accounting information */
748 #endif
749 #ifdef CONFIG_TASKSTATS
750 	struct taskstats *stats;
751 #endif
752 #ifdef CONFIG_AUDIT
753 	unsigned audit_tty;
754 	unsigned audit_tty_log_passwd;
755 	struct tty_audit_buf *tty_audit_buf;
756 #endif
757 
758 	oom_flags_t oom_flags;
759 	short oom_score_adj;		/* OOM kill score adjustment */
760 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
761 					 * Only settable by CAP_SYS_RESOURCE. */
762 
763 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
764 					 * credential calculations
765 					 * (notably. ptrace) */
766 };
767 
768 /*
769  * Bits in flags field of signal_struct.
770  */
771 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
772 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
773 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
774 #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
775 /*
776  * Pending notifications to parent.
777  */
778 #define SIGNAL_CLD_STOPPED	0x00000010
779 #define SIGNAL_CLD_CONTINUED	0x00000020
780 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
781 
782 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
783 
784 /* If true, all threads except ->group_exit_task have pending SIGKILL */
785 static inline int signal_group_exit(const struct signal_struct *sig)
786 {
787 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
788 		(sig->group_exit_task != NULL);
789 }
790 
791 /*
792  * Some day this will be a full-fledged user tracking system..
793  */
794 struct user_struct {
795 	atomic_t __count;	/* reference count */
796 	atomic_t processes;	/* How many processes does this user have? */
797 	atomic_t sigpending;	/* How many pending signals does this user have? */
798 #ifdef CONFIG_INOTIFY_USER
799 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
800 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
801 #endif
802 #ifdef CONFIG_FANOTIFY
803 	atomic_t fanotify_listeners;
804 #endif
805 #ifdef CONFIG_EPOLL
806 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
807 #endif
808 #ifdef CONFIG_POSIX_MQUEUE
809 	/* protected by mq_lock	*/
810 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
811 #endif
812 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
813 
814 #ifdef CONFIG_KEYS
815 	struct key *uid_keyring;	/* UID specific keyring */
816 	struct key *session_keyring;	/* UID's default session keyring */
817 #endif
818 
819 	/* Hash table maintenance information */
820 	struct hlist_node uidhash_node;
821 	kuid_t uid;
822 
823 #ifdef CONFIG_PERF_EVENTS
824 	atomic_long_t locked_vm;
825 #endif
826 };
827 
828 extern int uids_sysfs_init(void);
829 
830 extern struct user_struct *find_user(kuid_t);
831 
832 extern struct user_struct root_user;
833 #define INIT_USER (&root_user)
834 
835 
836 struct backing_dev_info;
837 struct reclaim_state;
838 
839 #ifdef CONFIG_SCHED_INFO
840 struct sched_info {
841 	/* cumulative counters */
842 	unsigned long pcount;	      /* # of times run on this cpu */
843 	unsigned long long run_delay; /* time spent waiting on a runqueue */
844 
845 	/* timestamps */
846 	unsigned long long last_arrival,/* when we last ran on a cpu */
847 			   last_queued;	/* when we were last queued to run */
848 };
849 #endif /* CONFIG_SCHED_INFO */
850 
851 #ifdef CONFIG_TASK_DELAY_ACCT
852 struct task_delay_info {
853 	spinlock_t	lock;
854 	unsigned int	flags;	/* Private per-task flags */
855 
856 	/* For each stat XXX, add following, aligned appropriately
857 	 *
858 	 * struct timespec XXX_start, XXX_end;
859 	 * u64 XXX_delay;
860 	 * u32 XXX_count;
861 	 *
862 	 * Atomicity of updates to XXX_delay, XXX_count protected by
863 	 * single lock above (split into XXX_lock if contention is an issue).
864 	 */
865 
866 	/*
867 	 * XXX_count is incremented on every XXX operation, the delay
868 	 * associated with the operation is added to XXX_delay.
869 	 * XXX_delay contains the accumulated delay time in nanoseconds.
870 	 */
871 	u64 blkio_start;	/* Shared by blkio, swapin */
872 	u64 blkio_delay;	/* wait for sync block io completion */
873 	u64 swapin_delay;	/* wait for swapin block io completion */
874 	u32 blkio_count;	/* total count of the number of sync block */
875 				/* io operations performed */
876 	u32 swapin_count;	/* total count of the number of swapin block */
877 				/* io operations performed */
878 
879 	u64 freepages_start;
880 	u64 freepages_delay;	/* wait for memory reclaim */
881 	u32 freepages_count;	/* total count of memory reclaim */
882 };
883 #endif	/* CONFIG_TASK_DELAY_ACCT */
884 
885 static inline int sched_info_on(void)
886 {
887 #ifdef CONFIG_SCHEDSTATS
888 	return 1;
889 #elif defined(CONFIG_TASK_DELAY_ACCT)
890 	extern int delayacct_on;
891 	return delayacct_on;
892 #else
893 	return 0;
894 #endif
895 }
896 
897 enum cpu_idle_type {
898 	CPU_IDLE,
899 	CPU_NOT_IDLE,
900 	CPU_NEWLY_IDLE,
901 	CPU_MAX_IDLE_TYPES
902 };
903 
904 /*
905  * Increase resolution of cpu_capacity calculations
906  */
907 #define SCHED_CAPACITY_SHIFT	10
908 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
909 
910 /*
911  * Wake-queues are lists of tasks with a pending wakeup, whose
912  * callers have already marked the task as woken internally,
913  * and can thus carry on. A common use case is being able to
914  * do the wakeups once the corresponding user lock as been
915  * released.
916  *
917  * We hold reference to each task in the list across the wakeup,
918  * thus guaranteeing that the memory is still valid by the time
919  * the actual wakeups are performed in wake_up_q().
920  *
921  * One per task suffices, because there's never a need for a task to be
922  * in two wake queues simultaneously; it is forbidden to abandon a task
923  * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
924  * already in a wake queue, the wakeup will happen soon and the second
925  * waker can just skip it.
926  *
927  * The WAKE_Q macro declares and initializes the list head.
928  * wake_up_q() does NOT reinitialize the list; it's expected to be
929  * called near the end of a function, where the fact that the queue is
930  * not used again will be easy to see by inspection.
931  *
932  * Note that this can cause spurious wakeups. schedule() callers
933  * must ensure the call is done inside a loop, confirming that the
934  * wakeup condition has in fact occurred.
935  */
936 struct wake_q_node {
937 	struct wake_q_node *next;
938 };
939 
940 struct wake_q_head {
941 	struct wake_q_node *first;
942 	struct wake_q_node **lastp;
943 };
944 
945 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
946 
947 #define WAKE_Q(name)					\
948 	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
949 
950 extern void wake_q_add(struct wake_q_head *head,
951 		       struct task_struct *task);
952 extern void wake_up_q(struct wake_q_head *head);
953 
954 /*
955  * sched-domains (multiprocessor balancing) declarations:
956  */
957 #ifdef CONFIG_SMP
958 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
959 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
960 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
961 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
962 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
963 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
964 #define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
965 #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
966 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
967 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
968 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
969 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
970 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
971 #define SD_NUMA			0x4000	/* cross-node balancing */
972 
973 #ifdef CONFIG_SCHED_SMT
974 static inline int cpu_smt_flags(void)
975 {
976 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
977 }
978 #endif
979 
980 #ifdef CONFIG_SCHED_MC
981 static inline int cpu_core_flags(void)
982 {
983 	return SD_SHARE_PKG_RESOURCES;
984 }
985 #endif
986 
987 #ifdef CONFIG_NUMA
988 static inline int cpu_numa_flags(void)
989 {
990 	return SD_NUMA;
991 }
992 #endif
993 
994 struct sched_domain_attr {
995 	int relax_domain_level;
996 };
997 
998 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
999 	.relax_domain_level = -1,			\
1000 }
1001 
1002 extern int sched_domain_level_max;
1003 
1004 struct sched_group;
1005 
1006 struct sched_domain {
1007 	/* These fields must be setup */
1008 	struct sched_domain *parent;	/* top domain must be null terminated */
1009 	struct sched_domain *child;	/* bottom domain must be null terminated */
1010 	struct sched_group *groups;	/* the balancing groups of the domain */
1011 	unsigned long min_interval;	/* Minimum balance interval ms */
1012 	unsigned long max_interval;	/* Maximum balance interval ms */
1013 	unsigned int busy_factor;	/* less balancing by factor if busy */
1014 	unsigned int imbalance_pct;	/* No balance until over watermark */
1015 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
1016 	unsigned int busy_idx;
1017 	unsigned int idle_idx;
1018 	unsigned int newidle_idx;
1019 	unsigned int wake_idx;
1020 	unsigned int forkexec_idx;
1021 	unsigned int smt_gain;
1022 
1023 	int nohz_idle;			/* NOHZ IDLE status */
1024 	int flags;			/* See SD_* */
1025 	int level;
1026 
1027 	/* Runtime fields. */
1028 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
1029 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
1030 	unsigned int nr_balance_failed; /* initialise to 0 */
1031 
1032 	/* idle_balance() stats */
1033 	u64 max_newidle_lb_cost;
1034 	unsigned long next_decay_max_lb_cost;
1035 
1036 #ifdef CONFIG_SCHEDSTATS
1037 	/* load_balance() stats */
1038 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1039 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1040 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1041 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1042 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1043 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1044 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1045 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1046 
1047 	/* Active load balancing */
1048 	unsigned int alb_count;
1049 	unsigned int alb_failed;
1050 	unsigned int alb_pushed;
1051 
1052 	/* SD_BALANCE_EXEC stats */
1053 	unsigned int sbe_count;
1054 	unsigned int sbe_balanced;
1055 	unsigned int sbe_pushed;
1056 
1057 	/* SD_BALANCE_FORK stats */
1058 	unsigned int sbf_count;
1059 	unsigned int sbf_balanced;
1060 	unsigned int sbf_pushed;
1061 
1062 	/* try_to_wake_up() stats */
1063 	unsigned int ttwu_wake_remote;
1064 	unsigned int ttwu_move_affine;
1065 	unsigned int ttwu_move_balance;
1066 #endif
1067 #ifdef CONFIG_SCHED_DEBUG
1068 	char *name;
1069 #endif
1070 	union {
1071 		void *private;		/* used during construction */
1072 		struct rcu_head rcu;	/* used during destruction */
1073 	};
1074 
1075 	unsigned int span_weight;
1076 	/*
1077 	 * Span of all CPUs in this domain.
1078 	 *
1079 	 * NOTE: this field is variable length. (Allocated dynamically
1080 	 * by attaching extra space to the end of the structure,
1081 	 * depending on how many CPUs the kernel has booted up with)
1082 	 */
1083 	unsigned long span[0];
1084 };
1085 
1086 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1087 {
1088 	return to_cpumask(sd->span);
1089 }
1090 
1091 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1092 				    struct sched_domain_attr *dattr_new);
1093 
1094 /* Allocate an array of sched domains, for partition_sched_domains(). */
1095 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1096 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1097 
1098 bool cpus_share_cache(int this_cpu, int that_cpu);
1099 
1100 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1101 typedef int (*sched_domain_flags_f)(void);
1102 
1103 #define SDTL_OVERLAP	0x01
1104 
1105 struct sd_data {
1106 	struct sched_domain **__percpu sd;
1107 	struct sched_group **__percpu sg;
1108 	struct sched_group_capacity **__percpu sgc;
1109 };
1110 
1111 struct sched_domain_topology_level {
1112 	sched_domain_mask_f mask;
1113 	sched_domain_flags_f sd_flags;
1114 	int		    flags;
1115 	int		    numa_level;
1116 	struct sd_data      data;
1117 #ifdef CONFIG_SCHED_DEBUG
1118 	char                *name;
1119 #endif
1120 };
1121 
1122 extern struct sched_domain_topology_level *sched_domain_topology;
1123 
1124 extern void set_sched_topology(struct sched_domain_topology_level *tl);
1125 extern void wake_up_if_idle(int cpu);
1126 
1127 #ifdef CONFIG_SCHED_DEBUG
1128 # define SD_INIT_NAME(type)		.name = #type
1129 #else
1130 # define SD_INIT_NAME(type)
1131 #endif
1132 
1133 #else /* CONFIG_SMP */
1134 
1135 struct sched_domain_attr;
1136 
1137 static inline void
1138 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1139 			struct sched_domain_attr *dattr_new)
1140 {
1141 }
1142 
1143 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1144 {
1145 	return true;
1146 }
1147 
1148 #endif	/* !CONFIG_SMP */
1149 
1150 
1151 struct io_context;			/* See blkdev.h */
1152 
1153 
1154 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1155 extern void prefetch_stack(struct task_struct *t);
1156 #else
1157 static inline void prefetch_stack(struct task_struct *t) { }
1158 #endif
1159 
1160 struct audit_context;		/* See audit.c */
1161 struct mempolicy;
1162 struct pipe_inode_info;
1163 struct uts_namespace;
1164 
1165 struct load_weight {
1166 	unsigned long weight;
1167 	u32 inv_weight;
1168 };
1169 
1170 struct sched_avg {
1171 	u64 last_runnable_update;
1172 	s64 decay_count;
1173 	/*
1174 	 * utilization_avg_contrib describes the amount of time that a
1175 	 * sched_entity is running on a CPU. It is based on running_avg_sum
1176 	 * and is scaled in the range [0..SCHED_LOAD_SCALE].
1177 	 * load_avg_contrib described the amount of time that a sched_entity
1178 	 * is runnable on a rq. It is based on both runnable_avg_sum and the
1179 	 * weight of the task.
1180 	 */
1181 	unsigned long load_avg_contrib, utilization_avg_contrib;
1182 	/*
1183 	 * These sums represent an infinite geometric series and so are bound
1184 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1185 	 * choices of y < 1-2^(-32)*1024.
1186 	 * running_avg_sum reflects the time that the sched_entity is
1187 	 * effectively running on the CPU.
1188 	 * runnable_avg_sum represents the amount of time a sched_entity is on
1189 	 * a runqueue which includes the running time that is monitored by
1190 	 * running_avg_sum.
1191 	 */
1192 	u32 runnable_avg_sum, avg_period, running_avg_sum;
1193 };
1194 
1195 #ifdef CONFIG_SCHEDSTATS
1196 struct sched_statistics {
1197 	u64			wait_start;
1198 	u64			wait_max;
1199 	u64			wait_count;
1200 	u64			wait_sum;
1201 	u64			iowait_count;
1202 	u64			iowait_sum;
1203 
1204 	u64			sleep_start;
1205 	u64			sleep_max;
1206 	s64			sum_sleep_runtime;
1207 
1208 	u64			block_start;
1209 	u64			block_max;
1210 	u64			exec_max;
1211 	u64			slice_max;
1212 
1213 	u64			nr_migrations_cold;
1214 	u64			nr_failed_migrations_affine;
1215 	u64			nr_failed_migrations_running;
1216 	u64			nr_failed_migrations_hot;
1217 	u64			nr_forced_migrations;
1218 
1219 	u64			nr_wakeups;
1220 	u64			nr_wakeups_sync;
1221 	u64			nr_wakeups_migrate;
1222 	u64			nr_wakeups_local;
1223 	u64			nr_wakeups_remote;
1224 	u64			nr_wakeups_affine;
1225 	u64			nr_wakeups_affine_attempts;
1226 	u64			nr_wakeups_passive;
1227 	u64			nr_wakeups_idle;
1228 };
1229 #endif
1230 
1231 struct sched_entity {
1232 	struct load_weight	load;		/* for load-balancing */
1233 	struct rb_node		run_node;
1234 	struct list_head	group_node;
1235 	unsigned int		on_rq;
1236 
1237 	u64			exec_start;
1238 	u64			sum_exec_runtime;
1239 	u64			vruntime;
1240 	u64			prev_sum_exec_runtime;
1241 
1242 	u64			nr_migrations;
1243 
1244 #ifdef CONFIG_SCHEDSTATS
1245 	struct sched_statistics statistics;
1246 #endif
1247 
1248 #ifdef CONFIG_FAIR_GROUP_SCHED
1249 	int			depth;
1250 	struct sched_entity	*parent;
1251 	/* rq on which this entity is (to be) queued: */
1252 	struct cfs_rq		*cfs_rq;
1253 	/* rq "owned" by this entity/group: */
1254 	struct cfs_rq		*my_q;
1255 #endif
1256 
1257 #ifdef CONFIG_SMP
1258 	/* Per-entity load-tracking */
1259 	struct sched_avg	avg;
1260 #endif
1261 };
1262 
1263 struct sched_rt_entity {
1264 	struct list_head run_list;
1265 	unsigned long timeout;
1266 	unsigned long watchdog_stamp;
1267 	unsigned int time_slice;
1268 
1269 	struct sched_rt_entity *back;
1270 #ifdef CONFIG_RT_GROUP_SCHED
1271 	struct sched_rt_entity	*parent;
1272 	/* rq on which this entity is (to be) queued: */
1273 	struct rt_rq		*rt_rq;
1274 	/* rq "owned" by this entity/group: */
1275 	struct rt_rq		*my_q;
1276 #endif
1277 };
1278 
1279 struct sched_dl_entity {
1280 	struct rb_node	rb_node;
1281 
1282 	/*
1283 	 * Original scheduling parameters. Copied here from sched_attr
1284 	 * during sched_setattr(), they will remain the same until
1285 	 * the next sched_setattr().
1286 	 */
1287 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1288 	u64 dl_deadline;	/* relative deadline of each instance	*/
1289 	u64 dl_period;		/* separation of two instances (period) */
1290 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1291 
1292 	/*
1293 	 * Actual scheduling parameters. Initialized with the values above,
1294 	 * they are continously updated during task execution. Note that
1295 	 * the remaining runtime could be < 0 in case we are in overrun.
1296 	 */
1297 	s64 runtime;		/* remaining runtime for this instance	*/
1298 	u64 deadline;		/* absolute deadline for this instance	*/
1299 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1300 
1301 	/*
1302 	 * Some bool flags:
1303 	 *
1304 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1305 	 * task has to wait for a replenishment to be performed at the
1306 	 * next firing of dl_timer.
1307 	 *
1308 	 * @dl_new tells if a new instance arrived. If so we must
1309 	 * start executing it with full runtime and reset its absolute
1310 	 * deadline;
1311 	 *
1312 	 * @dl_boosted tells if we are boosted due to DI. If so we are
1313 	 * outside bandwidth enforcement mechanism (but only until we
1314 	 * exit the critical section);
1315 	 *
1316 	 * @dl_yielded tells if task gave up the cpu before consuming
1317 	 * all its available runtime during the last job.
1318 	 */
1319 	int dl_throttled, dl_new, dl_boosted, dl_yielded;
1320 
1321 	/*
1322 	 * Bandwidth enforcement timer. Each -deadline task has its
1323 	 * own bandwidth to be enforced, thus we need one timer per task.
1324 	 */
1325 	struct hrtimer dl_timer;
1326 };
1327 
1328 union rcu_special {
1329 	struct {
1330 		bool blocked;
1331 		bool need_qs;
1332 	} b;
1333 	short s;
1334 };
1335 struct rcu_node;
1336 
1337 enum perf_event_task_context {
1338 	perf_invalid_context = -1,
1339 	perf_hw_context = 0,
1340 	perf_sw_context,
1341 	perf_nr_task_contexts,
1342 };
1343 
1344 struct task_struct {
1345 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1346 	void *stack;
1347 	atomic_t usage;
1348 	unsigned int flags;	/* per process flags, defined below */
1349 	unsigned int ptrace;
1350 
1351 #ifdef CONFIG_SMP
1352 	struct llist_node wake_entry;
1353 	int on_cpu;
1354 	struct task_struct *last_wakee;
1355 	unsigned long wakee_flips;
1356 	unsigned long wakee_flip_decay_ts;
1357 
1358 	int wake_cpu;
1359 #endif
1360 	int on_rq;
1361 
1362 	int prio, static_prio, normal_prio;
1363 	unsigned int rt_priority;
1364 	const struct sched_class *sched_class;
1365 	struct sched_entity se;
1366 	struct sched_rt_entity rt;
1367 #ifdef CONFIG_CGROUP_SCHED
1368 	struct task_group *sched_task_group;
1369 #endif
1370 	struct sched_dl_entity dl;
1371 
1372 #ifdef CONFIG_PREEMPT_NOTIFIERS
1373 	/* list of struct preempt_notifier: */
1374 	struct hlist_head preempt_notifiers;
1375 #endif
1376 
1377 #ifdef CONFIG_BLK_DEV_IO_TRACE
1378 	unsigned int btrace_seq;
1379 #endif
1380 
1381 	unsigned int policy;
1382 	int nr_cpus_allowed;
1383 	cpumask_t cpus_allowed;
1384 
1385 #ifdef CONFIG_PREEMPT_RCU
1386 	int rcu_read_lock_nesting;
1387 	union rcu_special rcu_read_unlock_special;
1388 	struct list_head rcu_node_entry;
1389 	struct rcu_node *rcu_blocked_node;
1390 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1391 #ifdef CONFIG_TASKS_RCU
1392 	unsigned long rcu_tasks_nvcsw;
1393 	bool rcu_tasks_holdout;
1394 	struct list_head rcu_tasks_holdout_list;
1395 	int rcu_tasks_idle_cpu;
1396 #endif /* #ifdef CONFIG_TASKS_RCU */
1397 
1398 #ifdef CONFIG_SCHED_INFO
1399 	struct sched_info sched_info;
1400 #endif
1401 
1402 	struct list_head tasks;
1403 #ifdef CONFIG_SMP
1404 	struct plist_node pushable_tasks;
1405 	struct rb_node pushable_dl_tasks;
1406 #endif
1407 
1408 	struct mm_struct *mm, *active_mm;
1409 	/* per-thread vma caching */
1410 	u32 vmacache_seqnum;
1411 	struct vm_area_struct *vmacache[VMACACHE_SIZE];
1412 #if defined(SPLIT_RSS_COUNTING)
1413 	struct task_rss_stat	rss_stat;
1414 #endif
1415 /* task state */
1416 	int exit_state;
1417 	int exit_code, exit_signal;
1418 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1419 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
1420 
1421 	/* Used for emulating ABI behavior of previous Linux versions */
1422 	unsigned int personality;
1423 
1424 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1425 				 * execve */
1426 	unsigned in_iowait:1;
1427 
1428 	/* Revert to default priority/policy when forking */
1429 	unsigned sched_reset_on_fork:1;
1430 	unsigned sched_contributes_to_load:1;
1431 	unsigned sched_migrated:1;
1432 
1433 #ifdef CONFIG_MEMCG_KMEM
1434 	unsigned memcg_kmem_skip_account:1;
1435 #endif
1436 #ifdef CONFIG_COMPAT_BRK
1437 	unsigned brk_randomized:1;
1438 #endif
1439 
1440 	unsigned long atomic_flags; /* Flags needing atomic access. */
1441 
1442 	struct restart_block restart_block;
1443 
1444 	pid_t pid;
1445 	pid_t tgid;
1446 
1447 #ifdef CONFIG_CC_STACKPROTECTOR
1448 	/* Canary value for the -fstack-protector gcc feature */
1449 	unsigned long stack_canary;
1450 #endif
1451 	/*
1452 	 * pointers to (original) parent process, youngest child, younger sibling,
1453 	 * older sibling, respectively.  (p->father can be replaced with
1454 	 * p->real_parent->pid)
1455 	 */
1456 	struct task_struct __rcu *real_parent; /* real parent process */
1457 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1458 	/*
1459 	 * children/sibling forms the list of my natural children
1460 	 */
1461 	struct list_head children;	/* list of my children */
1462 	struct list_head sibling;	/* linkage in my parent's children list */
1463 	struct task_struct *group_leader;	/* threadgroup leader */
1464 
1465 	/*
1466 	 * ptraced is the list of tasks this task is using ptrace on.
1467 	 * This includes both natural children and PTRACE_ATTACH targets.
1468 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1469 	 */
1470 	struct list_head ptraced;
1471 	struct list_head ptrace_entry;
1472 
1473 	/* PID/PID hash table linkage. */
1474 	struct pid_link pids[PIDTYPE_MAX];
1475 	struct list_head thread_group;
1476 	struct list_head thread_node;
1477 
1478 	struct completion *vfork_done;		/* for vfork() */
1479 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1480 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1481 
1482 	cputime_t utime, stime, utimescaled, stimescaled;
1483 	cputime_t gtime;
1484 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1485 	struct cputime prev_cputime;
1486 #endif
1487 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1488 	seqlock_t vtime_seqlock;
1489 	unsigned long long vtime_snap;
1490 	enum {
1491 		VTIME_SLEEPING = 0,
1492 		VTIME_USER,
1493 		VTIME_SYS,
1494 	} vtime_snap_whence;
1495 #endif
1496 	unsigned long nvcsw, nivcsw; /* context switch counts */
1497 	u64 start_time;		/* monotonic time in nsec */
1498 	u64 real_start_time;	/* boot based time in nsec */
1499 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1500 	unsigned long min_flt, maj_flt;
1501 
1502 	struct task_cputime cputime_expires;
1503 	struct list_head cpu_timers[3];
1504 
1505 /* process credentials */
1506 	const struct cred __rcu *real_cred; /* objective and real subjective task
1507 					 * credentials (COW) */
1508 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1509 					 * credentials (COW) */
1510 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1511 				     - access with [gs]et_task_comm (which lock
1512 				       it with task_lock())
1513 				     - initialized normally by setup_new_exec */
1514 /* file system info */
1515 	struct nameidata *nameidata;
1516 #ifdef CONFIG_SYSVIPC
1517 /* ipc stuff */
1518 	struct sysv_sem sysvsem;
1519 	struct sysv_shm sysvshm;
1520 #endif
1521 #ifdef CONFIG_DETECT_HUNG_TASK
1522 /* hung task detection */
1523 	unsigned long last_switch_count;
1524 #endif
1525 /* CPU-specific state of this task */
1526 	struct thread_struct thread;
1527 /* filesystem information */
1528 	struct fs_struct *fs;
1529 /* open file information */
1530 	struct files_struct *files;
1531 /* namespaces */
1532 	struct nsproxy *nsproxy;
1533 /* signal handlers */
1534 	struct signal_struct *signal;
1535 	struct sighand_struct *sighand;
1536 
1537 	sigset_t blocked, real_blocked;
1538 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1539 	struct sigpending pending;
1540 
1541 	unsigned long sas_ss_sp;
1542 	size_t sas_ss_size;
1543 	int (*notifier)(void *priv);
1544 	void *notifier_data;
1545 	sigset_t *notifier_mask;
1546 	struct callback_head *task_works;
1547 
1548 	struct audit_context *audit_context;
1549 #ifdef CONFIG_AUDITSYSCALL
1550 	kuid_t loginuid;
1551 	unsigned int sessionid;
1552 #endif
1553 	struct seccomp seccomp;
1554 
1555 /* Thread group tracking */
1556    	u32 parent_exec_id;
1557    	u32 self_exec_id;
1558 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1559  * mempolicy */
1560 	spinlock_t alloc_lock;
1561 
1562 	/* Protection of the PI data structures: */
1563 	raw_spinlock_t pi_lock;
1564 
1565 	struct wake_q_node wake_q;
1566 
1567 #ifdef CONFIG_RT_MUTEXES
1568 	/* PI waiters blocked on a rt_mutex held by this task */
1569 	struct rb_root pi_waiters;
1570 	struct rb_node *pi_waiters_leftmost;
1571 	/* Deadlock detection and priority inheritance handling */
1572 	struct rt_mutex_waiter *pi_blocked_on;
1573 #endif
1574 
1575 #ifdef CONFIG_DEBUG_MUTEXES
1576 	/* mutex deadlock detection */
1577 	struct mutex_waiter *blocked_on;
1578 #endif
1579 #ifdef CONFIG_TRACE_IRQFLAGS
1580 	unsigned int irq_events;
1581 	unsigned long hardirq_enable_ip;
1582 	unsigned long hardirq_disable_ip;
1583 	unsigned int hardirq_enable_event;
1584 	unsigned int hardirq_disable_event;
1585 	int hardirqs_enabled;
1586 	int hardirq_context;
1587 	unsigned long softirq_disable_ip;
1588 	unsigned long softirq_enable_ip;
1589 	unsigned int softirq_disable_event;
1590 	unsigned int softirq_enable_event;
1591 	int softirqs_enabled;
1592 	int softirq_context;
1593 #endif
1594 #ifdef CONFIG_LOCKDEP
1595 # define MAX_LOCK_DEPTH 48UL
1596 	u64 curr_chain_key;
1597 	int lockdep_depth;
1598 	unsigned int lockdep_recursion;
1599 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1600 	gfp_t lockdep_reclaim_gfp;
1601 #endif
1602 
1603 /* journalling filesystem info */
1604 	void *journal_info;
1605 
1606 /* stacked block device info */
1607 	struct bio_list *bio_list;
1608 
1609 #ifdef CONFIG_BLOCK
1610 /* stack plugging */
1611 	struct blk_plug *plug;
1612 #endif
1613 
1614 /* VM state */
1615 	struct reclaim_state *reclaim_state;
1616 
1617 	struct backing_dev_info *backing_dev_info;
1618 
1619 	struct io_context *io_context;
1620 
1621 	unsigned long ptrace_message;
1622 	siginfo_t *last_siginfo; /* For ptrace use.  */
1623 	struct task_io_accounting ioac;
1624 #if defined(CONFIG_TASK_XACCT)
1625 	u64 acct_rss_mem1;	/* accumulated rss usage */
1626 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1627 	cputime_t acct_timexpd;	/* stime + utime since last update */
1628 #endif
1629 #ifdef CONFIG_CPUSETS
1630 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1631 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1632 	int cpuset_mem_spread_rotor;
1633 	int cpuset_slab_spread_rotor;
1634 #endif
1635 #ifdef CONFIG_CGROUPS
1636 	/* Control Group info protected by css_set_lock */
1637 	struct css_set __rcu *cgroups;
1638 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1639 	struct list_head cg_list;
1640 #endif
1641 #ifdef CONFIG_FUTEX
1642 	struct robust_list_head __user *robust_list;
1643 #ifdef CONFIG_COMPAT
1644 	struct compat_robust_list_head __user *compat_robust_list;
1645 #endif
1646 	struct list_head pi_state_list;
1647 	struct futex_pi_state *pi_state_cache;
1648 #endif
1649 #ifdef CONFIG_PERF_EVENTS
1650 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1651 	struct mutex perf_event_mutex;
1652 	struct list_head perf_event_list;
1653 #endif
1654 #ifdef CONFIG_DEBUG_PREEMPT
1655 	unsigned long preempt_disable_ip;
1656 #endif
1657 #ifdef CONFIG_NUMA
1658 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1659 	short il_next;
1660 	short pref_node_fork;
1661 #endif
1662 #ifdef CONFIG_NUMA_BALANCING
1663 	int numa_scan_seq;
1664 	unsigned int numa_scan_period;
1665 	unsigned int numa_scan_period_max;
1666 	int numa_preferred_nid;
1667 	unsigned long numa_migrate_retry;
1668 	u64 node_stamp;			/* migration stamp  */
1669 	u64 last_task_numa_placement;
1670 	u64 last_sum_exec_runtime;
1671 	struct callback_head numa_work;
1672 
1673 	struct list_head numa_entry;
1674 	struct numa_group *numa_group;
1675 
1676 	/*
1677 	 * numa_faults is an array split into four regions:
1678 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1679 	 * in this precise order.
1680 	 *
1681 	 * faults_memory: Exponential decaying average of faults on a per-node
1682 	 * basis. Scheduling placement decisions are made based on these
1683 	 * counts. The values remain static for the duration of a PTE scan.
1684 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1685 	 * hinting fault was incurred.
1686 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1687 	 * during the current scan window. When the scan completes, the counts
1688 	 * in faults_memory and faults_cpu decay and these values are copied.
1689 	 */
1690 	unsigned long *numa_faults;
1691 	unsigned long total_numa_faults;
1692 
1693 	/*
1694 	 * numa_faults_locality tracks if faults recorded during the last
1695 	 * scan window were remote/local or failed to migrate. The task scan
1696 	 * period is adapted based on the locality of the faults with different
1697 	 * weights depending on whether they were shared or private faults
1698 	 */
1699 	unsigned long numa_faults_locality[3];
1700 
1701 	unsigned long numa_pages_migrated;
1702 #endif /* CONFIG_NUMA_BALANCING */
1703 
1704 	struct rcu_head rcu;
1705 
1706 	/*
1707 	 * cache last used pipe for splice
1708 	 */
1709 	struct pipe_inode_info *splice_pipe;
1710 
1711 	struct page_frag task_frag;
1712 
1713 #ifdef	CONFIG_TASK_DELAY_ACCT
1714 	struct task_delay_info *delays;
1715 #endif
1716 #ifdef CONFIG_FAULT_INJECTION
1717 	int make_it_fail;
1718 #endif
1719 	/*
1720 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1721 	 * balance_dirty_pages() for some dirty throttling pause
1722 	 */
1723 	int nr_dirtied;
1724 	int nr_dirtied_pause;
1725 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1726 
1727 #ifdef CONFIG_LATENCYTOP
1728 	int latency_record_count;
1729 	struct latency_record latency_record[LT_SAVECOUNT];
1730 #endif
1731 	/*
1732 	 * time slack values; these are used to round up poll() and
1733 	 * select() etc timeout values. These are in nanoseconds.
1734 	 */
1735 	unsigned long timer_slack_ns;
1736 	unsigned long default_timer_slack_ns;
1737 
1738 #ifdef CONFIG_KASAN
1739 	unsigned int kasan_depth;
1740 #endif
1741 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1742 	/* Index of current stored address in ret_stack */
1743 	int curr_ret_stack;
1744 	/* Stack of return addresses for return function tracing */
1745 	struct ftrace_ret_stack	*ret_stack;
1746 	/* time stamp for last schedule */
1747 	unsigned long long ftrace_timestamp;
1748 	/*
1749 	 * Number of functions that haven't been traced
1750 	 * because of depth overrun.
1751 	 */
1752 	atomic_t trace_overrun;
1753 	/* Pause for the tracing */
1754 	atomic_t tracing_graph_pause;
1755 #endif
1756 #ifdef CONFIG_TRACING
1757 	/* state flags for use by tracers */
1758 	unsigned long trace;
1759 	/* bitmask and counter of trace recursion */
1760 	unsigned long trace_recursion;
1761 #endif /* CONFIG_TRACING */
1762 #ifdef CONFIG_MEMCG
1763 	struct memcg_oom_info {
1764 		struct mem_cgroup *memcg;
1765 		gfp_t gfp_mask;
1766 		int order;
1767 		unsigned int may_oom:1;
1768 	} memcg_oom;
1769 #endif
1770 #ifdef CONFIG_UPROBES
1771 	struct uprobe_task *utask;
1772 #endif
1773 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1774 	unsigned int	sequential_io;
1775 	unsigned int	sequential_io_avg;
1776 #endif
1777 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1778 	unsigned long	task_state_change;
1779 #endif
1780 	int pagefault_disabled;
1781 };
1782 
1783 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1784 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1785 
1786 #define TNF_MIGRATED	0x01
1787 #define TNF_NO_GROUP	0x02
1788 #define TNF_SHARED	0x04
1789 #define TNF_FAULT_LOCAL	0x08
1790 #define TNF_MIGRATE_FAIL 0x10
1791 
1792 #ifdef CONFIG_NUMA_BALANCING
1793 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1794 extern pid_t task_numa_group_id(struct task_struct *p);
1795 extern void set_numabalancing_state(bool enabled);
1796 extern void task_numa_free(struct task_struct *p);
1797 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1798 					int src_nid, int dst_cpu);
1799 #else
1800 static inline void task_numa_fault(int last_node, int node, int pages,
1801 				   int flags)
1802 {
1803 }
1804 static inline pid_t task_numa_group_id(struct task_struct *p)
1805 {
1806 	return 0;
1807 }
1808 static inline void set_numabalancing_state(bool enabled)
1809 {
1810 }
1811 static inline void task_numa_free(struct task_struct *p)
1812 {
1813 }
1814 static inline bool should_numa_migrate_memory(struct task_struct *p,
1815 				struct page *page, int src_nid, int dst_cpu)
1816 {
1817 	return true;
1818 }
1819 #endif
1820 
1821 static inline struct pid *task_pid(struct task_struct *task)
1822 {
1823 	return task->pids[PIDTYPE_PID].pid;
1824 }
1825 
1826 static inline struct pid *task_tgid(struct task_struct *task)
1827 {
1828 	return task->group_leader->pids[PIDTYPE_PID].pid;
1829 }
1830 
1831 /*
1832  * Without tasklist or rcu lock it is not safe to dereference
1833  * the result of task_pgrp/task_session even if task == current,
1834  * we can race with another thread doing sys_setsid/sys_setpgid.
1835  */
1836 static inline struct pid *task_pgrp(struct task_struct *task)
1837 {
1838 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1839 }
1840 
1841 static inline struct pid *task_session(struct task_struct *task)
1842 {
1843 	return task->group_leader->pids[PIDTYPE_SID].pid;
1844 }
1845 
1846 struct pid_namespace;
1847 
1848 /*
1849  * the helpers to get the task's different pids as they are seen
1850  * from various namespaces
1851  *
1852  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1853  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1854  *                     current.
1855  * task_xid_nr_ns()  : id seen from the ns specified;
1856  *
1857  * set_task_vxid()   : assigns a virtual id to a task;
1858  *
1859  * see also pid_nr() etc in include/linux/pid.h
1860  */
1861 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1862 			struct pid_namespace *ns);
1863 
1864 static inline pid_t task_pid_nr(struct task_struct *tsk)
1865 {
1866 	return tsk->pid;
1867 }
1868 
1869 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1870 					struct pid_namespace *ns)
1871 {
1872 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1873 }
1874 
1875 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1876 {
1877 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1878 }
1879 
1880 
1881 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1882 {
1883 	return tsk->tgid;
1884 }
1885 
1886 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1887 
1888 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1889 {
1890 	return pid_vnr(task_tgid(tsk));
1891 }
1892 
1893 
1894 static inline int pid_alive(const struct task_struct *p);
1895 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1896 {
1897 	pid_t pid = 0;
1898 
1899 	rcu_read_lock();
1900 	if (pid_alive(tsk))
1901 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1902 	rcu_read_unlock();
1903 
1904 	return pid;
1905 }
1906 
1907 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1908 {
1909 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1910 }
1911 
1912 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1913 					struct pid_namespace *ns)
1914 {
1915 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1916 }
1917 
1918 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1919 {
1920 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1921 }
1922 
1923 
1924 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1925 					struct pid_namespace *ns)
1926 {
1927 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1928 }
1929 
1930 static inline pid_t task_session_vnr(struct task_struct *tsk)
1931 {
1932 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1933 }
1934 
1935 /* obsolete, do not use */
1936 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1937 {
1938 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1939 }
1940 
1941 /**
1942  * pid_alive - check that a task structure is not stale
1943  * @p: Task structure to be checked.
1944  *
1945  * Test if a process is not yet dead (at most zombie state)
1946  * If pid_alive fails, then pointers within the task structure
1947  * can be stale and must not be dereferenced.
1948  *
1949  * Return: 1 if the process is alive. 0 otherwise.
1950  */
1951 static inline int pid_alive(const struct task_struct *p)
1952 {
1953 	return p->pids[PIDTYPE_PID].pid != NULL;
1954 }
1955 
1956 /**
1957  * is_global_init - check if a task structure is init
1958  * @tsk: Task structure to be checked.
1959  *
1960  * Check if a task structure is the first user space task the kernel created.
1961  *
1962  * Return: 1 if the task structure is init. 0 otherwise.
1963  */
1964 static inline int is_global_init(struct task_struct *tsk)
1965 {
1966 	return tsk->pid == 1;
1967 }
1968 
1969 extern struct pid *cad_pid;
1970 
1971 extern void free_task(struct task_struct *tsk);
1972 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1973 
1974 extern void __put_task_struct(struct task_struct *t);
1975 
1976 static inline void put_task_struct(struct task_struct *t)
1977 {
1978 	if (atomic_dec_and_test(&t->usage))
1979 		__put_task_struct(t);
1980 }
1981 
1982 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1983 extern void task_cputime(struct task_struct *t,
1984 			 cputime_t *utime, cputime_t *stime);
1985 extern void task_cputime_scaled(struct task_struct *t,
1986 				cputime_t *utimescaled, cputime_t *stimescaled);
1987 extern cputime_t task_gtime(struct task_struct *t);
1988 #else
1989 static inline void task_cputime(struct task_struct *t,
1990 				cputime_t *utime, cputime_t *stime)
1991 {
1992 	if (utime)
1993 		*utime = t->utime;
1994 	if (stime)
1995 		*stime = t->stime;
1996 }
1997 
1998 static inline void task_cputime_scaled(struct task_struct *t,
1999 				       cputime_t *utimescaled,
2000 				       cputime_t *stimescaled)
2001 {
2002 	if (utimescaled)
2003 		*utimescaled = t->utimescaled;
2004 	if (stimescaled)
2005 		*stimescaled = t->stimescaled;
2006 }
2007 
2008 static inline cputime_t task_gtime(struct task_struct *t)
2009 {
2010 	return t->gtime;
2011 }
2012 #endif
2013 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2014 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2015 
2016 /*
2017  * Per process flags
2018  */
2019 #define PF_EXITING	0x00000004	/* getting shut down */
2020 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
2021 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
2022 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
2023 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
2024 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
2025 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
2026 #define PF_DUMPCORE	0x00000200	/* dumped core */
2027 #define PF_SIGNALED	0x00000400	/* killed by a signal */
2028 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
2029 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
2030 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
2031 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
2032 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
2033 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
2034 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
2035 #define PF_KSWAPD	0x00040000	/* I am kswapd */
2036 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
2037 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
2038 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
2039 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
2040 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
2041 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
2042 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
2043 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
2044 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
2045 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
2046 
2047 /*
2048  * Only the _current_ task can read/write to tsk->flags, but other
2049  * tasks can access tsk->flags in readonly mode for example
2050  * with tsk_used_math (like during threaded core dumping).
2051  * There is however an exception to this rule during ptrace
2052  * or during fork: the ptracer task is allowed to write to the
2053  * child->flags of its traced child (same goes for fork, the parent
2054  * can write to the child->flags), because we're guaranteed the
2055  * child is not running and in turn not changing child->flags
2056  * at the same time the parent does it.
2057  */
2058 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2059 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2060 #define clear_used_math() clear_stopped_child_used_math(current)
2061 #define set_used_math() set_stopped_child_used_math(current)
2062 #define conditional_stopped_child_used_math(condition, child) \
2063 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2064 #define conditional_used_math(condition) \
2065 	conditional_stopped_child_used_math(condition, current)
2066 #define copy_to_stopped_child_used_math(child) \
2067 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2068 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2069 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2070 #define used_math() tsk_used_math(current)
2071 
2072 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2073  * __GFP_FS is also cleared as it implies __GFP_IO.
2074  */
2075 static inline gfp_t memalloc_noio_flags(gfp_t flags)
2076 {
2077 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2078 		flags &= ~(__GFP_IO | __GFP_FS);
2079 	return flags;
2080 }
2081 
2082 static inline unsigned int memalloc_noio_save(void)
2083 {
2084 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2085 	current->flags |= PF_MEMALLOC_NOIO;
2086 	return flags;
2087 }
2088 
2089 static inline void memalloc_noio_restore(unsigned int flags)
2090 {
2091 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2092 }
2093 
2094 /* Per-process atomic flags. */
2095 #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2096 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2097 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2098 
2099 
2100 #define TASK_PFA_TEST(name, func)					\
2101 	static inline bool task_##func(struct task_struct *p)		\
2102 	{ return test_bit(PFA_##name, &p->atomic_flags); }
2103 #define TASK_PFA_SET(name, func)					\
2104 	static inline void task_set_##func(struct task_struct *p)	\
2105 	{ set_bit(PFA_##name, &p->atomic_flags); }
2106 #define TASK_PFA_CLEAR(name, func)					\
2107 	static inline void task_clear_##func(struct task_struct *p)	\
2108 	{ clear_bit(PFA_##name, &p->atomic_flags); }
2109 
2110 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2111 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2112 
2113 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2114 TASK_PFA_SET(SPREAD_PAGE, spread_page)
2115 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2116 
2117 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2118 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2119 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2120 
2121 /*
2122  * task->jobctl flags
2123  */
2124 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2125 
2126 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
2127 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
2128 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2129 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2130 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2131 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
2132 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2133 
2134 #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
2135 #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
2136 #define JOBCTL_STOP_CONSUME	(1UL << JOBCTL_STOP_CONSUME_BIT)
2137 #define JOBCTL_TRAP_STOP	(1UL << JOBCTL_TRAP_STOP_BIT)
2138 #define JOBCTL_TRAP_NOTIFY	(1UL << JOBCTL_TRAP_NOTIFY_BIT)
2139 #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
2140 #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
2141 
2142 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2143 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2144 
2145 extern bool task_set_jobctl_pending(struct task_struct *task,
2146 				    unsigned long mask);
2147 extern void task_clear_jobctl_trapping(struct task_struct *task);
2148 extern void task_clear_jobctl_pending(struct task_struct *task,
2149 				      unsigned long mask);
2150 
2151 static inline void rcu_copy_process(struct task_struct *p)
2152 {
2153 #ifdef CONFIG_PREEMPT_RCU
2154 	p->rcu_read_lock_nesting = 0;
2155 	p->rcu_read_unlock_special.s = 0;
2156 	p->rcu_blocked_node = NULL;
2157 	INIT_LIST_HEAD(&p->rcu_node_entry);
2158 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2159 #ifdef CONFIG_TASKS_RCU
2160 	p->rcu_tasks_holdout = false;
2161 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2162 	p->rcu_tasks_idle_cpu = -1;
2163 #endif /* #ifdef CONFIG_TASKS_RCU */
2164 }
2165 
2166 static inline void tsk_restore_flags(struct task_struct *task,
2167 				unsigned long orig_flags, unsigned long flags)
2168 {
2169 	task->flags &= ~flags;
2170 	task->flags |= orig_flags & flags;
2171 }
2172 
2173 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2174 				     const struct cpumask *trial);
2175 extern int task_can_attach(struct task_struct *p,
2176 			   const struct cpumask *cs_cpus_allowed);
2177 #ifdef CONFIG_SMP
2178 extern void do_set_cpus_allowed(struct task_struct *p,
2179 			       const struct cpumask *new_mask);
2180 
2181 extern int set_cpus_allowed_ptr(struct task_struct *p,
2182 				const struct cpumask *new_mask);
2183 #else
2184 static inline void do_set_cpus_allowed(struct task_struct *p,
2185 				      const struct cpumask *new_mask)
2186 {
2187 }
2188 static inline int set_cpus_allowed_ptr(struct task_struct *p,
2189 				       const struct cpumask *new_mask)
2190 {
2191 	if (!cpumask_test_cpu(0, new_mask))
2192 		return -EINVAL;
2193 	return 0;
2194 }
2195 #endif
2196 
2197 #ifdef CONFIG_NO_HZ_COMMON
2198 void calc_load_enter_idle(void);
2199 void calc_load_exit_idle(void);
2200 #else
2201 static inline void calc_load_enter_idle(void) { }
2202 static inline void calc_load_exit_idle(void) { }
2203 #endif /* CONFIG_NO_HZ_COMMON */
2204 
2205 #ifndef CONFIG_CPUMASK_OFFSTACK
2206 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2207 {
2208 	return set_cpus_allowed_ptr(p, &new_mask);
2209 }
2210 #endif
2211 
2212 /*
2213  * Do not use outside of architecture code which knows its limitations.
2214  *
2215  * sched_clock() has no promise of monotonicity or bounded drift between
2216  * CPUs, use (which you should not) requires disabling IRQs.
2217  *
2218  * Please use one of the three interfaces below.
2219  */
2220 extern unsigned long long notrace sched_clock(void);
2221 /*
2222  * See the comment in kernel/sched/clock.c
2223  */
2224 extern u64 cpu_clock(int cpu);
2225 extern u64 local_clock(void);
2226 extern u64 running_clock(void);
2227 extern u64 sched_clock_cpu(int cpu);
2228 
2229 
2230 extern void sched_clock_init(void);
2231 
2232 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2233 static inline void sched_clock_tick(void)
2234 {
2235 }
2236 
2237 static inline void sched_clock_idle_sleep_event(void)
2238 {
2239 }
2240 
2241 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2242 {
2243 }
2244 #else
2245 /*
2246  * Architectures can set this to 1 if they have specified
2247  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2248  * but then during bootup it turns out that sched_clock()
2249  * is reliable after all:
2250  */
2251 extern int sched_clock_stable(void);
2252 extern void set_sched_clock_stable(void);
2253 extern void clear_sched_clock_stable(void);
2254 
2255 extern void sched_clock_tick(void);
2256 extern void sched_clock_idle_sleep_event(void);
2257 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2258 #endif
2259 
2260 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2261 /*
2262  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2263  * The reason for this explicit opt-in is not to have perf penalty with
2264  * slow sched_clocks.
2265  */
2266 extern void enable_sched_clock_irqtime(void);
2267 extern void disable_sched_clock_irqtime(void);
2268 #else
2269 static inline void enable_sched_clock_irqtime(void) {}
2270 static inline void disable_sched_clock_irqtime(void) {}
2271 #endif
2272 
2273 extern unsigned long long
2274 task_sched_runtime(struct task_struct *task);
2275 
2276 /* sched_exec is called by processes performing an exec */
2277 #ifdef CONFIG_SMP
2278 extern void sched_exec(void);
2279 #else
2280 #define sched_exec()   {}
2281 #endif
2282 
2283 extern void sched_clock_idle_sleep_event(void);
2284 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2285 
2286 #ifdef CONFIG_HOTPLUG_CPU
2287 extern void idle_task_exit(void);
2288 #else
2289 static inline void idle_task_exit(void) {}
2290 #endif
2291 
2292 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2293 extern void wake_up_nohz_cpu(int cpu);
2294 #else
2295 static inline void wake_up_nohz_cpu(int cpu) { }
2296 #endif
2297 
2298 #ifdef CONFIG_NO_HZ_FULL
2299 extern bool sched_can_stop_tick(void);
2300 extern u64 scheduler_tick_max_deferment(void);
2301 #else
2302 static inline bool sched_can_stop_tick(void) { return false; }
2303 #endif
2304 
2305 #ifdef CONFIG_SCHED_AUTOGROUP
2306 extern void sched_autogroup_create_attach(struct task_struct *p);
2307 extern void sched_autogroup_detach(struct task_struct *p);
2308 extern void sched_autogroup_fork(struct signal_struct *sig);
2309 extern void sched_autogroup_exit(struct signal_struct *sig);
2310 #ifdef CONFIG_PROC_FS
2311 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2312 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2313 #endif
2314 #else
2315 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2316 static inline void sched_autogroup_detach(struct task_struct *p) { }
2317 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2318 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2319 #endif
2320 
2321 extern int yield_to(struct task_struct *p, bool preempt);
2322 extern void set_user_nice(struct task_struct *p, long nice);
2323 extern int task_prio(const struct task_struct *p);
2324 /**
2325  * task_nice - return the nice value of a given task.
2326  * @p: the task in question.
2327  *
2328  * Return: The nice value [ -20 ... 0 ... 19 ].
2329  */
2330 static inline int task_nice(const struct task_struct *p)
2331 {
2332 	return PRIO_TO_NICE((p)->static_prio);
2333 }
2334 extern int can_nice(const struct task_struct *p, const int nice);
2335 extern int task_curr(const struct task_struct *p);
2336 extern int idle_cpu(int cpu);
2337 extern int sched_setscheduler(struct task_struct *, int,
2338 			      const struct sched_param *);
2339 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2340 				      const struct sched_param *);
2341 extern int sched_setattr(struct task_struct *,
2342 			 const struct sched_attr *);
2343 extern struct task_struct *idle_task(int cpu);
2344 /**
2345  * is_idle_task - is the specified task an idle task?
2346  * @p: the task in question.
2347  *
2348  * Return: 1 if @p is an idle task. 0 otherwise.
2349  */
2350 static inline bool is_idle_task(const struct task_struct *p)
2351 {
2352 	return p->pid == 0;
2353 }
2354 extern struct task_struct *curr_task(int cpu);
2355 extern void set_curr_task(int cpu, struct task_struct *p);
2356 
2357 void yield(void);
2358 
2359 union thread_union {
2360 	struct thread_info thread_info;
2361 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2362 };
2363 
2364 #ifndef __HAVE_ARCH_KSTACK_END
2365 static inline int kstack_end(void *addr)
2366 {
2367 	/* Reliable end of stack detection:
2368 	 * Some APM bios versions misalign the stack
2369 	 */
2370 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2371 }
2372 #endif
2373 
2374 extern union thread_union init_thread_union;
2375 extern struct task_struct init_task;
2376 
2377 extern struct   mm_struct init_mm;
2378 
2379 extern struct pid_namespace init_pid_ns;
2380 
2381 /*
2382  * find a task by one of its numerical ids
2383  *
2384  * find_task_by_pid_ns():
2385  *      finds a task by its pid in the specified namespace
2386  * find_task_by_vpid():
2387  *      finds a task by its virtual pid
2388  *
2389  * see also find_vpid() etc in include/linux/pid.h
2390  */
2391 
2392 extern struct task_struct *find_task_by_vpid(pid_t nr);
2393 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2394 		struct pid_namespace *ns);
2395 
2396 /* per-UID process charging. */
2397 extern struct user_struct * alloc_uid(kuid_t);
2398 static inline struct user_struct *get_uid(struct user_struct *u)
2399 {
2400 	atomic_inc(&u->__count);
2401 	return u;
2402 }
2403 extern void free_uid(struct user_struct *);
2404 
2405 #include <asm/current.h>
2406 
2407 extern void xtime_update(unsigned long ticks);
2408 
2409 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2410 extern int wake_up_process(struct task_struct *tsk);
2411 extern void wake_up_new_task(struct task_struct *tsk);
2412 #ifdef CONFIG_SMP
2413  extern void kick_process(struct task_struct *tsk);
2414 #else
2415  static inline void kick_process(struct task_struct *tsk) { }
2416 #endif
2417 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2418 extern void sched_dead(struct task_struct *p);
2419 
2420 extern void proc_caches_init(void);
2421 extern void flush_signals(struct task_struct *);
2422 extern void ignore_signals(struct task_struct *);
2423 extern void flush_signal_handlers(struct task_struct *, int force_default);
2424 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2425 
2426 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2427 {
2428 	unsigned long flags;
2429 	int ret;
2430 
2431 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2432 	ret = dequeue_signal(tsk, mask, info);
2433 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2434 
2435 	return ret;
2436 }
2437 
2438 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2439 			      sigset_t *mask);
2440 extern void unblock_all_signals(void);
2441 extern void release_task(struct task_struct * p);
2442 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2443 extern int force_sigsegv(int, struct task_struct *);
2444 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2445 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2446 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2447 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2448 				const struct cred *, u32);
2449 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2450 extern int kill_pid(struct pid *pid, int sig, int priv);
2451 extern int kill_proc_info(int, struct siginfo *, pid_t);
2452 extern __must_check bool do_notify_parent(struct task_struct *, int);
2453 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2454 extern void force_sig(int, struct task_struct *);
2455 extern int send_sig(int, struct task_struct *, int);
2456 extern int zap_other_threads(struct task_struct *p);
2457 extern struct sigqueue *sigqueue_alloc(void);
2458 extern void sigqueue_free(struct sigqueue *);
2459 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2460 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2461 
2462 static inline void restore_saved_sigmask(void)
2463 {
2464 	if (test_and_clear_restore_sigmask())
2465 		__set_current_blocked(&current->saved_sigmask);
2466 }
2467 
2468 static inline sigset_t *sigmask_to_save(void)
2469 {
2470 	sigset_t *res = &current->blocked;
2471 	if (unlikely(test_restore_sigmask()))
2472 		res = &current->saved_sigmask;
2473 	return res;
2474 }
2475 
2476 static inline int kill_cad_pid(int sig, int priv)
2477 {
2478 	return kill_pid(cad_pid, sig, priv);
2479 }
2480 
2481 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2482 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2483 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2484 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2485 
2486 /*
2487  * True if we are on the alternate signal stack.
2488  */
2489 static inline int on_sig_stack(unsigned long sp)
2490 {
2491 #ifdef CONFIG_STACK_GROWSUP
2492 	return sp >= current->sas_ss_sp &&
2493 		sp - current->sas_ss_sp < current->sas_ss_size;
2494 #else
2495 	return sp > current->sas_ss_sp &&
2496 		sp - current->sas_ss_sp <= current->sas_ss_size;
2497 #endif
2498 }
2499 
2500 static inline int sas_ss_flags(unsigned long sp)
2501 {
2502 	if (!current->sas_ss_size)
2503 		return SS_DISABLE;
2504 
2505 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
2506 }
2507 
2508 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2509 {
2510 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2511 #ifdef CONFIG_STACK_GROWSUP
2512 		return current->sas_ss_sp;
2513 #else
2514 		return current->sas_ss_sp + current->sas_ss_size;
2515 #endif
2516 	return sp;
2517 }
2518 
2519 /*
2520  * Routines for handling mm_structs
2521  */
2522 extern struct mm_struct * mm_alloc(void);
2523 
2524 /* mmdrop drops the mm and the page tables */
2525 extern void __mmdrop(struct mm_struct *);
2526 static inline void mmdrop(struct mm_struct * mm)
2527 {
2528 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2529 		__mmdrop(mm);
2530 }
2531 
2532 /* mmput gets rid of the mappings and all user-space */
2533 extern void mmput(struct mm_struct *);
2534 /* Grab a reference to a task's mm, if it is not already going away */
2535 extern struct mm_struct *get_task_mm(struct task_struct *task);
2536 /*
2537  * Grab a reference to a task's mm, if it is not already going away
2538  * and ptrace_may_access with the mode parameter passed to it
2539  * succeeds.
2540  */
2541 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2542 /* Remove the current tasks stale references to the old mm_struct */
2543 extern void mm_release(struct task_struct *, struct mm_struct *);
2544 
2545 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
2546 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2547 			struct task_struct *, unsigned long);
2548 #else
2549 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2550 			struct task_struct *);
2551 
2552 /* Architectures that haven't opted into copy_thread_tls get the tls argument
2553  * via pt_regs, so ignore the tls argument passed via C. */
2554 static inline int copy_thread_tls(
2555 		unsigned long clone_flags, unsigned long sp, unsigned long arg,
2556 		struct task_struct *p, unsigned long tls)
2557 {
2558 	return copy_thread(clone_flags, sp, arg, p);
2559 }
2560 #endif
2561 extern void flush_thread(void);
2562 extern void exit_thread(void);
2563 
2564 extern void exit_files(struct task_struct *);
2565 extern void __cleanup_sighand(struct sighand_struct *);
2566 
2567 extern void exit_itimers(struct signal_struct *);
2568 extern void flush_itimer_signals(void);
2569 
2570 extern void do_group_exit(int);
2571 
2572 extern int do_execve(struct filename *,
2573 		     const char __user * const __user *,
2574 		     const char __user * const __user *);
2575 extern int do_execveat(int, struct filename *,
2576 		       const char __user * const __user *,
2577 		       const char __user * const __user *,
2578 		       int);
2579 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2580 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2581 struct task_struct *fork_idle(int);
2582 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2583 
2584 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2585 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2586 {
2587 	__set_task_comm(tsk, from, false);
2588 }
2589 extern char *get_task_comm(char *to, struct task_struct *tsk);
2590 
2591 #ifdef CONFIG_SMP
2592 void scheduler_ipi(void);
2593 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2594 #else
2595 static inline void scheduler_ipi(void) { }
2596 static inline unsigned long wait_task_inactive(struct task_struct *p,
2597 					       long match_state)
2598 {
2599 	return 1;
2600 }
2601 #endif
2602 
2603 #define tasklist_empty() \
2604 	list_empty(&init_task.tasks)
2605 
2606 #define next_task(p) \
2607 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2608 
2609 #define for_each_process(p) \
2610 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2611 
2612 extern bool current_is_single_threaded(void);
2613 
2614 /*
2615  * Careful: do_each_thread/while_each_thread is a double loop so
2616  *          'break' will not work as expected - use goto instead.
2617  */
2618 #define do_each_thread(g, t) \
2619 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2620 
2621 #define while_each_thread(g, t) \
2622 	while ((t = next_thread(t)) != g)
2623 
2624 #define __for_each_thread(signal, t)	\
2625 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2626 
2627 #define for_each_thread(p, t)		\
2628 	__for_each_thread((p)->signal, t)
2629 
2630 /* Careful: this is a double loop, 'break' won't work as expected. */
2631 #define for_each_process_thread(p, t)	\
2632 	for_each_process(p) for_each_thread(p, t)
2633 
2634 static inline int get_nr_threads(struct task_struct *tsk)
2635 {
2636 	return tsk->signal->nr_threads;
2637 }
2638 
2639 static inline bool thread_group_leader(struct task_struct *p)
2640 {
2641 	return p->exit_signal >= 0;
2642 }
2643 
2644 /* Do to the insanities of de_thread it is possible for a process
2645  * to have the pid of the thread group leader without actually being
2646  * the thread group leader.  For iteration through the pids in proc
2647  * all we care about is that we have a task with the appropriate
2648  * pid, we don't actually care if we have the right task.
2649  */
2650 static inline bool has_group_leader_pid(struct task_struct *p)
2651 {
2652 	return task_pid(p) == p->signal->leader_pid;
2653 }
2654 
2655 static inline
2656 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2657 {
2658 	return p1->signal == p2->signal;
2659 }
2660 
2661 static inline struct task_struct *next_thread(const struct task_struct *p)
2662 {
2663 	return list_entry_rcu(p->thread_group.next,
2664 			      struct task_struct, thread_group);
2665 }
2666 
2667 static inline int thread_group_empty(struct task_struct *p)
2668 {
2669 	return list_empty(&p->thread_group);
2670 }
2671 
2672 #define delay_group_leader(p) \
2673 		(thread_group_leader(p) && !thread_group_empty(p))
2674 
2675 /*
2676  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2677  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2678  * pins the final release of task.io_context.  Also protects ->cpuset and
2679  * ->cgroup.subsys[]. And ->vfork_done.
2680  *
2681  * Nests both inside and outside of read_lock(&tasklist_lock).
2682  * It must not be nested with write_lock_irq(&tasklist_lock),
2683  * neither inside nor outside.
2684  */
2685 static inline void task_lock(struct task_struct *p)
2686 {
2687 	spin_lock(&p->alloc_lock);
2688 }
2689 
2690 static inline void task_unlock(struct task_struct *p)
2691 {
2692 	spin_unlock(&p->alloc_lock);
2693 }
2694 
2695 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2696 							unsigned long *flags);
2697 
2698 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2699 						       unsigned long *flags)
2700 {
2701 	struct sighand_struct *ret;
2702 
2703 	ret = __lock_task_sighand(tsk, flags);
2704 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2705 	return ret;
2706 }
2707 
2708 static inline void unlock_task_sighand(struct task_struct *tsk,
2709 						unsigned long *flags)
2710 {
2711 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2712 }
2713 
2714 /**
2715  * threadgroup_change_begin - mark the beginning of changes to a threadgroup
2716  * @tsk: task causing the changes
2717  *
2718  * All operations which modify a threadgroup - a new thread joining the
2719  * group, death of a member thread (the assertion of PF_EXITING) and
2720  * exec(2) dethreading the process and replacing the leader - are wrapped
2721  * by threadgroup_change_{begin|end}().  This is to provide a place which
2722  * subsystems needing threadgroup stability can hook into for
2723  * synchronization.
2724  */
2725 static inline void threadgroup_change_begin(struct task_struct *tsk)
2726 {
2727 	might_sleep();
2728 	cgroup_threadgroup_change_begin(tsk);
2729 }
2730 
2731 /**
2732  * threadgroup_change_end - mark the end of changes to a threadgroup
2733  * @tsk: task causing the changes
2734  *
2735  * See threadgroup_change_begin().
2736  */
2737 static inline void threadgroup_change_end(struct task_struct *tsk)
2738 {
2739 	cgroup_threadgroup_change_end(tsk);
2740 }
2741 
2742 #ifndef __HAVE_THREAD_FUNCTIONS
2743 
2744 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2745 #define task_stack_page(task)	((task)->stack)
2746 
2747 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2748 {
2749 	*task_thread_info(p) = *task_thread_info(org);
2750 	task_thread_info(p)->task = p;
2751 }
2752 
2753 /*
2754  * Return the address of the last usable long on the stack.
2755  *
2756  * When the stack grows down, this is just above the thread
2757  * info struct. Going any lower will corrupt the threadinfo.
2758  *
2759  * When the stack grows up, this is the highest address.
2760  * Beyond that position, we corrupt data on the next page.
2761  */
2762 static inline unsigned long *end_of_stack(struct task_struct *p)
2763 {
2764 #ifdef CONFIG_STACK_GROWSUP
2765 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2766 #else
2767 	return (unsigned long *)(task_thread_info(p) + 1);
2768 #endif
2769 }
2770 
2771 #endif
2772 #define task_stack_end_corrupted(task) \
2773 		(*(end_of_stack(task)) != STACK_END_MAGIC)
2774 
2775 static inline int object_is_on_stack(void *obj)
2776 {
2777 	void *stack = task_stack_page(current);
2778 
2779 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2780 }
2781 
2782 extern void thread_info_cache_init(void);
2783 
2784 #ifdef CONFIG_DEBUG_STACK_USAGE
2785 static inline unsigned long stack_not_used(struct task_struct *p)
2786 {
2787 	unsigned long *n = end_of_stack(p);
2788 
2789 	do { 	/* Skip over canary */
2790 		n++;
2791 	} while (!*n);
2792 
2793 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2794 }
2795 #endif
2796 extern void set_task_stack_end_magic(struct task_struct *tsk);
2797 
2798 /* set thread flags in other task's structures
2799  * - see asm/thread_info.h for TIF_xxxx flags available
2800  */
2801 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2802 {
2803 	set_ti_thread_flag(task_thread_info(tsk), flag);
2804 }
2805 
2806 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2807 {
2808 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2809 }
2810 
2811 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2812 {
2813 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2814 }
2815 
2816 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2817 {
2818 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2819 }
2820 
2821 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2822 {
2823 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2824 }
2825 
2826 static inline void set_tsk_need_resched(struct task_struct *tsk)
2827 {
2828 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2829 }
2830 
2831 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2832 {
2833 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2834 }
2835 
2836 static inline int test_tsk_need_resched(struct task_struct *tsk)
2837 {
2838 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2839 }
2840 
2841 static inline int restart_syscall(void)
2842 {
2843 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2844 	return -ERESTARTNOINTR;
2845 }
2846 
2847 static inline int signal_pending(struct task_struct *p)
2848 {
2849 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2850 }
2851 
2852 static inline int __fatal_signal_pending(struct task_struct *p)
2853 {
2854 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2855 }
2856 
2857 static inline int fatal_signal_pending(struct task_struct *p)
2858 {
2859 	return signal_pending(p) && __fatal_signal_pending(p);
2860 }
2861 
2862 static inline int signal_pending_state(long state, struct task_struct *p)
2863 {
2864 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2865 		return 0;
2866 	if (!signal_pending(p))
2867 		return 0;
2868 
2869 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2870 }
2871 
2872 /*
2873  * cond_resched() and cond_resched_lock(): latency reduction via
2874  * explicit rescheduling in places that are safe. The return
2875  * value indicates whether a reschedule was done in fact.
2876  * cond_resched_lock() will drop the spinlock before scheduling,
2877  * cond_resched_softirq() will enable bhs before scheduling.
2878  */
2879 extern int _cond_resched(void);
2880 
2881 #define cond_resched() ({			\
2882 	___might_sleep(__FILE__, __LINE__, 0);	\
2883 	_cond_resched();			\
2884 })
2885 
2886 extern int __cond_resched_lock(spinlock_t *lock);
2887 
2888 #ifdef CONFIG_PREEMPT_COUNT
2889 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2890 #else
2891 #define PREEMPT_LOCK_OFFSET	0
2892 #endif
2893 
2894 #define cond_resched_lock(lock) ({				\
2895 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2896 	__cond_resched_lock(lock);				\
2897 })
2898 
2899 extern int __cond_resched_softirq(void);
2900 
2901 #define cond_resched_softirq() ({					\
2902 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2903 	__cond_resched_softirq();					\
2904 })
2905 
2906 static inline void cond_resched_rcu(void)
2907 {
2908 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2909 	rcu_read_unlock();
2910 	cond_resched();
2911 	rcu_read_lock();
2912 #endif
2913 }
2914 
2915 /*
2916  * Does a critical section need to be broken due to another
2917  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2918  * but a general need for low latency)
2919  */
2920 static inline int spin_needbreak(spinlock_t *lock)
2921 {
2922 #ifdef CONFIG_PREEMPT
2923 	return spin_is_contended(lock);
2924 #else
2925 	return 0;
2926 #endif
2927 }
2928 
2929 /*
2930  * Idle thread specific functions to determine the need_resched
2931  * polling state.
2932  */
2933 #ifdef TIF_POLLING_NRFLAG
2934 static inline int tsk_is_polling(struct task_struct *p)
2935 {
2936 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2937 }
2938 
2939 static inline void __current_set_polling(void)
2940 {
2941 	set_thread_flag(TIF_POLLING_NRFLAG);
2942 }
2943 
2944 static inline bool __must_check current_set_polling_and_test(void)
2945 {
2946 	__current_set_polling();
2947 
2948 	/*
2949 	 * Polling state must be visible before we test NEED_RESCHED,
2950 	 * paired by resched_curr()
2951 	 */
2952 	smp_mb__after_atomic();
2953 
2954 	return unlikely(tif_need_resched());
2955 }
2956 
2957 static inline void __current_clr_polling(void)
2958 {
2959 	clear_thread_flag(TIF_POLLING_NRFLAG);
2960 }
2961 
2962 static inline bool __must_check current_clr_polling_and_test(void)
2963 {
2964 	__current_clr_polling();
2965 
2966 	/*
2967 	 * Polling state must be visible before we test NEED_RESCHED,
2968 	 * paired by resched_curr()
2969 	 */
2970 	smp_mb__after_atomic();
2971 
2972 	return unlikely(tif_need_resched());
2973 }
2974 
2975 #else
2976 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2977 static inline void __current_set_polling(void) { }
2978 static inline void __current_clr_polling(void) { }
2979 
2980 static inline bool __must_check current_set_polling_and_test(void)
2981 {
2982 	return unlikely(tif_need_resched());
2983 }
2984 static inline bool __must_check current_clr_polling_and_test(void)
2985 {
2986 	return unlikely(tif_need_resched());
2987 }
2988 #endif
2989 
2990 static inline void current_clr_polling(void)
2991 {
2992 	__current_clr_polling();
2993 
2994 	/*
2995 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2996 	 * Once the bit is cleared, we'll get IPIs with every new
2997 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2998 	 * fold.
2999 	 */
3000 	smp_mb(); /* paired with resched_curr() */
3001 
3002 	preempt_fold_need_resched();
3003 }
3004 
3005 static __always_inline bool need_resched(void)
3006 {
3007 	return unlikely(tif_need_resched());
3008 }
3009 
3010 /*
3011  * Thread group CPU time accounting.
3012  */
3013 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3014 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3015 
3016 /*
3017  * Reevaluate whether the task has signals pending delivery.
3018  * Wake the task if so.
3019  * This is required every time the blocked sigset_t changes.
3020  * callers must hold sighand->siglock.
3021  */
3022 extern void recalc_sigpending_and_wake(struct task_struct *t);
3023 extern void recalc_sigpending(void);
3024 
3025 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3026 
3027 static inline void signal_wake_up(struct task_struct *t, bool resume)
3028 {
3029 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3030 }
3031 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3032 {
3033 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3034 }
3035 
3036 /*
3037  * Wrappers for p->thread_info->cpu access. No-op on UP.
3038  */
3039 #ifdef CONFIG_SMP
3040 
3041 static inline unsigned int task_cpu(const struct task_struct *p)
3042 {
3043 	return task_thread_info(p)->cpu;
3044 }
3045 
3046 static inline int task_node(const struct task_struct *p)
3047 {
3048 	return cpu_to_node(task_cpu(p));
3049 }
3050 
3051 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3052 
3053 #else
3054 
3055 static inline unsigned int task_cpu(const struct task_struct *p)
3056 {
3057 	return 0;
3058 }
3059 
3060 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3061 {
3062 }
3063 
3064 #endif /* CONFIG_SMP */
3065 
3066 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3067 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3068 
3069 #ifdef CONFIG_CGROUP_SCHED
3070 extern struct task_group root_task_group;
3071 #endif /* CONFIG_CGROUP_SCHED */
3072 
3073 extern int task_can_switch_user(struct user_struct *up,
3074 					struct task_struct *tsk);
3075 
3076 #ifdef CONFIG_TASK_XACCT
3077 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3078 {
3079 	tsk->ioac.rchar += amt;
3080 }
3081 
3082 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3083 {
3084 	tsk->ioac.wchar += amt;
3085 }
3086 
3087 static inline void inc_syscr(struct task_struct *tsk)
3088 {
3089 	tsk->ioac.syscr++;
3090 }
3091 
3092 static inline void inc_syscw(struct task_struct *tsk)
3093 {
3094 	tsk->ioac.syscw++;
3095 }
3096 #else
3097 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3098 {
3099 }
3100 
3101 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3102 {
3103 }
3104 
3105 static inline void inc_syscr(struct task_struct *tsk)
3106 {
3107 }
3108 
3109 static inline void inc_syscw(struct task_struct *tsk)
3110 {
3111 }
3112 #endif
3113 
3114 #ifndef TASK_SIZE_OF
3115 #define TASK_SIZE_OF(tsk)	TASK_SIZE
3116 #endif
3117 
3118 #ifdef CONFIG_MEMCG
3119 extern void mm_update_next_owner(struct mm_struct *mm);
3120 #else
3121 static inline void mm_update_next_owner(struct mm_struct *mm)
3122 {
3123 }
3124 #endif /* CONFIG_MEMCG */
3125 
3126 static inline unsigned long task_rlimit(const struct task_struct *tsk,
3127 		unsigned int limit)
3128 {
3129 	return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3130 }
3131 
3132 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3133 		unsigned int limit)
3134 {
3135 	return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3136 }
3137 
3138 static inline unsigned long rlimit(unsigned int limit)
3139 {
3140 	return task_rlimit(current, limit);
3141 }
3142 
3143 static inline unsigned long rlimit_max(unsigned int limit)
3144 {
3145 	return task_rlimit_max(current, limit);
3146 }
3147 
3148 #endif
3149