xref: /linux/include/linux/sched.h (revision 6701212e86398622a05b85d87391646bf0e81bfc)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 #include <linux/sched/prio.h>
7 
8 
9 struct sched_param {
10 	int sched_priority;
11 };
12 
13 #include <asm/param.h>	/* for HZ */
14 
15 #include <linux/capability.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/timex.h>
20 #include <linux/jiffies.h>
21 #include <linux/plist.h>
22 #include <linux/rbtree.h>
23 #include <linux/thread_info.h>
24 #include <linux/cpumask.h>
25 #include <linux/errno.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm_types.h>
28 #include <linux/preempt.h>
29 
30 #include <asm/page.h>
31 #include <asm/ptrace.h>
32 #include <linux/cputime.h>
33 
34 #include <linux/smp.h>
35 #include <linux/sem.h>
36 #include <linux/shm.h>
37 #include <linux/signal.h>
38 #include <linux/compiler.h>
39 #include <linux/completion.h>
40 #include <linux/pid.h>
41 #include <linux/percpu.h>
42 #include <linux/topology.h>
43 #include <linux/proportions.h>
44 #include <linux/seccomp.h>
45 #include <linux/rcupdate.h>
46 #include <linux/rculist.h>
47 #include <linux/rtmutex.h>
48 
49 #include <linux/time.h>
50 #include <linux/param.h>
51 #include <linux/resource.h>
52 #include <linux/timer.h>
53 #include <linux/hrtimer.h>
54 #include <linux/task_io_accounting.h>
55 #include <linux/latencytop.h>
56 #include <linux/cred.h>
57 #include <linux/llist.h>
58 #include <linux/uidgid.h>
59 #include <linux/gfp.h>
60 #include <linux/magic.h>
61 #include <linux/cgroup-defs.h>
62 
63 #include <asm/processor.h>
64 
65 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
66 
67 /*
68  * Extended scheduling parameters data structure.
69  *
70  * This is needed because the original struct sched_param can not be
71  * altered without introducing ABI issues with legacy applications
72  * (e.g., in sched_getparam()).
73  *
74  * However, the possibility of specifying more than just a priority for
75  * the tasks may be useful for a wide variety of application fields, e.g.,
76  * multimedia, streaming, automation and control, and many others.
77  *
78  * This variant (sched_attr) is meant at describing a so-called
79  * sporadic time-constrained task. In such model a task is specified by:
80  *  - the activation period or minimum instance inter-arrival time;
81  *  - the maximum (or average, depending on the actual scheduling
82  *    discipline) computation time of all instances, a.k.a. runtime;
83  *  - the deadline (relative to the actual activation time) of each
84  *    instance.
85  * Very briefly, a periodic (sporadic) task asks for the execution of
86  * some specific computation --which is typically called an instance--
87  * (at most) every period. Moreover, each instance typically lasts no more
88  * than the runtime and must be completed by time instant t equal to
89  * the instance activation time + the deadline.
90  *
91  * This is reflected by the actual fields of the sched_attr structure:
92  *
93  *  @size		size of the structure, for fwd/bwd compat.
94  *
95  *  @sched_policy	task's scheduling policy
96  *  @sched_flags	for customizing the scheduler behaviour
97  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
98  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
99  *  @sched_deadline	representative of the task's deadline
100  *  @sched_runtime	representative of the task's runtime
101  *  @sched_period	representative of the task's period
102  *
103  * Given this task model, there are a multiplicity of scheduling algorithms
104  * and policies, that can be used to ensure all the tasks will make their
105  * timing constraints.
106  *
107  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
108  * only user of this new interface. More information about the algorithm
109  * available in the scheduling class file or in Documentation/.
110  */
111 struct sched_attr {
112 	u32 size;
113 
114 	u32 sched_policy;
115 	u64 sched_flags;
116 
117 	/* SCHED_NORMAL, SCHED_BATCH */
118 	s32 sched_nice;
119 
120 	/* SCHED_FIFO, SCHED_RR */
121 	u32 sched_priority;
122 
123 	/* SCHED_DEADLINE */
124 	u64 sched_runtime;
125 	u64 sched_deadline;
126 	u64 sched_period;
127 };
128 
129 struct futex_pi_state;
130 struct robust_list_head;
131 struct bio_list;
132 struct fs_struct;
133 struct perf_event_context;
134 struct blk_plug;
135 struct filename;
136 struct nameidata;
137 
138 #define VMACACHE_BITS 2
139 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
140 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
141 
142 /*
143  * These are the constant used to fake the fixed-point load-average
144  * counting. Some notes:
145  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
146  *    a load-average precision of 10 bits integer + 11 bits fractional
147  *  - if you want to count load-averages more often, you need more
148  *    precision, or rounding will get you. With 2-second counting freq,
149  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
150  *    11 bit fractions.
151  */
152 extern unsigned long avenrun[];		/* Load averages */
153 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154 
155 #define FSHIFT		11		/* nr of bits of precision */
156 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
157 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
158 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
159 #define EXP_5		2014		/* 1/exp(5sec/5min) */
160 #define EXP_15		2037		/* 1/exp(5sec/15min) */
161 
162 #define CALC_LOAD(load,exp,n) \
163 	load *= exp; \
164 	load += n*(FIXED_1-exp); \
165 	load >>= FSHIFT;
166 
167 extern unsigned long total_forks;
168 extern int nr_threads;
169 DECLARE_PER_CPU(unsigned long, process_counts);
170 extern int nr_processes(void);
171 extern unsigned long nr_running(void);
172 extern bool single_task_running(void);
173 extern unsigned long nr_iowait(void);
174 extern unsigned long nr_iowait_cpu(int cpu);
175 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176 
177 extern void calc_global_load(unsigned long ticks);
178 
179 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180 extern void update_cpu_load_nohz(void);
181 #else
182 static inline void update_cpu_load_nohz(void) { }
183 #endif
184 
185 extern unsigned long get_parent_ip(unsigned long addr);
186 
187 extern void dump_cpu_task(int cpu);
188 
189 struct seq_file;
190 struct cfs_rq;
191 struct task_group;
192 #ifdef CONFIG_SCHED_DEBUG
193 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194 extern void proc_sched_set_task(struct task_struct *p);
195 #endif
196 
197 /*
198  * Task state bitmask. NOTE! These bits are also
199  * encoded in fs/proc/array.c: get_task_state().
200  *
201  * We have two separate sets of flags: task->state
202  * is about runnability, while task->exit_state are
203  * about the task exiting. Confusing, but this way
204  * modifying one set can't modify the other one by
205  * mistake.
206  */
207 #define TASK_RUNNING		0
208 #define TASK_INTERRUPTIBLE	1
209 #define TASK_UNINTERRUPTIBLE	2
210 #define __TASK_STOPPED		4
211 #define __TASK_TRACED		8
212 /* in tsk->exit_state */
213 #define EXIT_DEAD		16
214 #define EXIT_ZOMBIE		32
215 #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
216 /* in tsk->state again */
217 #define TASK_DEAD		64
218 #define TASK_WAKEKILL		128
219 #define TASK_WAKING		256
220 #define TASK_PARKED		512
221 #define TASK_NOLOAD		1024
222 #define TASK_STATE_MAX		2048
223 
224 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
225 
226 extern char ___assert_task_state[1 - 2*!!(
227 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
228 
229 /* Convenience macros for the sake of set_task_state */
230 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
231 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
232 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
233 
234 #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
235 
236 /* Convenience macros for the sake of wake_up */
237 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
238 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
239 
240 /* get_task_state() */
241 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
242 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
243 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
244 
245 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
246 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
247 #define task_is_stopped_or_traced(task)	\
248 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
249 #define task_contributes_to_load(task)	\
250 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
251 				 (task->flags & PF_FROZEN) == 0 && \
252 				 (task->state & TASK_NOLOAD) == 0)
253 
254 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
255 
256 #define __set_task_state(tsk, state_value)			\
257 	do {							\
258 		(tsk)->task_state_change = _THIS_IP_;		\
259 		(tsk)->state = (state_value);			\
260 	} while (0)
261 #define set_task_state(tsk, state_value)			\
262 	do {							\
263 		(tsk)->task_state_change = _THIS_IP_;		\
264 		smp_store_mb((tsk)->state, (state_value));		\
265 	} while (0)
266 
267 /*
268  * set_current_state() includes a barrier so that the write of current->state
269  * is correctly serialised wrt the caller's subsequent test of whether to
270  * actually sleep:
271  *
272  *	set_current_state(TASK_UNINTERRUPTIBLE);
273  *	if (do_i_need_to_sleep())
274  *		schedule();
275  *
276  * If the caller does not need such serialisation then use __set_current_state()
277  */
278 #define __set_current_state(state_value)			\
279 	do {							\
280 		current->task_state_change = _THIS_IP_;		\
281 		current->state = (state_value);			\
282 	} while (0)
283 #define set_current_state(state_value)				\
284 	do {							\
285 		current->task_state_change = _THIS_IP_;		\
286 		smp_store_mb(current->state, (state_value));		\
287 	} while (0)
288 
289 #else
290 
291 #define __set_task_state(tsk, state_value)		\
292 	do { (tsk)->state = (state_value); } while (0)
293 #define set_task_state(tsk, state_value)		\
294 	smp_store_mb((tsk)->state, (state_value))
295 
296 /*
297  * set_current_state() includes a barrier so that the write of current->state
298  * is correctly serialised wrt the caller's subsequent test of whether to
299  * actually sleep:
300  *
301  *	set_current_state(TASK_UNINTERRUPTIBLE);
302  *	if (do_i_need_to_sleep())
303  *		schedule();
304  *
305  * If the caller does not need such serialisation then use __set_current_state()
306  */
307 #define __set_current_state(state_value)		\
308 	do { current->state = (state_value); } while (0)
309 #define set_current_state(state_value)			\
310 	smp_store_mb(current->state, (state_value))
311 
312 #endif
313 
314 /* Task command name length */
315 #define TASK_COMM_LEN 16
316 
317 #include <linux/spinlock.h>
318 
319 /*
320  * This serializes "schedule()" and also protects
321  * the run-queue from deletions/modifications (but
322  * _adding_ to the beginning of the run-queue has
323  * a separate lock).
324  */
325 extern rwlock_t tasklist_lock;
326 extern spinlock_t mmlist_lock;
327 
328 struct task_struct;
329 
330 #ifdef CONFIG_PROVE_RCU
331 extern int lockdep_tasklist_lock_is_held(void);
332 #endif /* #ifdef CONFIG_PROVE_RCU */
333 
334 extern void sched_init(void);
335 extern void sched_init_smp(void);
336 extern asmlinkage void schedule_tail(struct task_struct *prev);
337 extern void init_idle(struct task_struct *idle, int cpu);
338 extern void init_idle_bootup_task(struct task_struct *idle);
339 
340 extern cpumask_var_t cpu_isolated_map;
341 
342 extern int runqueue_is_locked(int cpu);
343 
344 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
345 extern void nohz_balance_enter_idle(int cpu);
346 extern void set_cpu_sd_state_idle(void);
347 extern int get_nohz_timer_target(void);
348 #else
349 static inline void nohz_balance_enter_idle(int cpu) { }
350 static inline void set_cpu_sd_state_idle(void) { }
351 #endif
352 
353 /*
354  * Only dump TASK_* tasks. (0 for all tasks)
355  */
356 extern void show_state_filter(unsigned long state_filter);
357 
358 static inline void show_state(void)
359 {
360 	show_state_filter(0);
361 }
362 
363 extern void show_regs(struct pt_regs *);
364 
365 /*
366  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
367  * task), SP is the stack pointer of the first frame that should be shown in the back
368  * trace (or NULL if the entire call-chain of the task should be shown).
369  */
370 extern void show_stack(struct task_struct *task, unsigned long *sp);
371 
372 extern void cpu_init (void);
373 extern void trap_init(void);
374 extern void update_process_times(int user);
375 extern void scheduler_tick(void);
376 
377 extern void sched_show_task(struct task_struct *p);
378 
379 #ifdef CONFIG_LOCKUP_DETECTOR
380 extern void touch_softlockup_watchdog(void);
381 extern void touch_softlockup_watchdog_sync(void);
382 extern void touch_all_softlockup_watchdogs(void);
383 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
384 				  void __user *buffer,
385 				  size_t *lenp, loff_t *ppos);
386 extern unsigned int  softlockup_panic;
387 void lockup_detector_init(void);
388 #else
389 static inline void touch_softlockup_watchdog(void)
390 {
391 }
392 static inline void touch_softlockup_watchdog_sync(void)
393 {
394 }
395 static inline void touch_all_softlockup_watchdogs(void)
396 {
397 }
398 static inline void lockup_detector_init(void)
399 {
400 }
401 #endif
402 
403 #ifdef CONFIG_DETECT_HUNG_TASK
404 void reset_hung_task_detector(void);
405 #else
406 static inline void reset_hung_task_detector(void)
407 {
408 }
409 #endif
410 
411 /* Attach to any functions which should be ignored in wchan output. */
412 #define __sched		__attribute__((__section__(".sched.text")))
413 
414 /* Linker adds these: start and end of __sched functions */
415 extern char __sched_text_start[], __sched_text_end[];
416 
417 /* Is this address in the __sched functions? */
418 extern int in_sched_functions(unsigned long addr);
419 
420 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
421 extern signed long schedule_timeout(signed long timeout);
422 extern signed long schedule_timeout_interruptible(signed long timeout);
423 extern signed long schedule_timeout_killable(signed long timeout);
424 extern signed long schedule_timeout_uninterruptible(signed long timeout);
425 asmlinkage void schedule(void);
426 extern void schedule_preempt_disabled(void);
427 
428 extern long io_schedule_timeout(long timeout);
429 
430 static inline void io_schedule(void)
431 {
432 	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
433 }
434 
435 struct nsproxy;
436 struct user_namespace;
437 
438 #ifdef CONFIG_MMU
439 extern void arch_pick_mmap_layout(struct mm_struct *mm);
440 extern unsigned long
441 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
442 		       unsigned long, unsigned long);
443 extern unsigned long
444 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
445 			  unsigned long len, unsigned long pgoff,
446 			  unsigned long flags);
447 #else
448 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
449 #endif
450 
451 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
452 #define SUID_DUMP_USER		1	/* Dump as user of process */
453 #define SUID_DUMP_ROOT		2	/* Dump as root */
454 
455 /* mm flags */
456 
457 /* for SUID_DUMP_* above */
458 #define MMF_DUMPABLE_BITS 2
459 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
460 
461 extern void set_dumpable(struct mm_struct *mm, int value);
462 /*
463  * This returns the actual value of the suid_dumpable flag. For things
464  * that are using this for checking for privilege transitions, it must
465  * test against SUID_DUMP_USER rather than treating it as a boolean
466  * value.
467  */
468 static inline int __get_dumpable(unsigned long mm_flags)
469 {
470 	return mm_flags & MMF_DUMPABLE_MASK;
471 }
472 
473 static inline int get_dumpable(struct mm_struct *mm)
474 {
475 	return __get_dumpable(mm->flags);
476 }
477 
478 /* coredump filter bits */
479 #define MMF_DUMP_ANON_PRIVATE	2
480 #define MMF_DUMP_ANON_SHARED	3
481 #define MMF_DUMP_MAPPED_PRIVATE	4
482 #define MMF_DUMP_MAPPED_SHARED	5
483 #define MMF_DUMP_ELF_HEADERS	6
484 #define MMF_DUMP_HUGETLB_PRIVATE 7
485 #define MMF_DUMP_HUGETLB_SHARED  8
486 
487 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
488 #define MMF_DUMP_FILTER_BITS	7
489 #define MMF_DUMP_FILTER_MASK \
490 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
491 #define MMF_DUMP_FILTER_DEFAULT \
492 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
493 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
494 
495 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
496 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
497 #else
498 # define MMF_DUMP_MASK_DEFAULT_ELF	0
499 #endif
500 					/* leave room for more dump flags */
501 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
502 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
503 #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
504 
505 #define MMF_HAS_UPROBES		19	/* has uprobes */
506 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
507 
508 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
509 
510 struct sighand_struct {
511 	atomic_t		count;
512 	struct k_sigaction	action[_NSIG];
513 	spinlock_t		siglock;
514 	wait_queue_head_t	signalfd_wqh;
515 };
516 
517 struct pacct_struct {
518 	int			ac_flag;
519 	long			ac_exitcode;
520 	unsigned long		ac_mem;
521 	cputime_t		ac_utime, ac_stime;
522 	unsigned long		ac_minflt, ac_majflt;
523 };
524 
525 struct cpu_itimer {
526 	cputime_t expires;
527 	cputime_t incr;
528 	u32 error;
529 	u32 incr_error;
530 };
531 
532 /**
533  * struct prev_cputime - snaphsot of system and user cputime
534  * @utime: time spent in user mode
535  * @stime: time spent in system mode
536  * @lock: protects the above two fields
537  *
538  * Stores previous user/system time values such that we can guarantee
539  * monotonicity.
540  */
541 struct prev_cputime {
542 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
543 	cputime_t utime;
544 	cputime_t stime;
545 	raw_spinlock_t lock;
546 #endif
547 };
548 
549 static inline void prev_cputime_init(struct prev_cputime *prev)
550 {
551 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
552 	prev->utime = prev->stime = 0;
553 	raw_spin_lock_init(&prev->lock);
554 #endif
555 }
556 
557 /**
558  * struct task_cputime - collected CPU time counts
559  * @utime:		time spent in user mode, in &cputime_t units
560  * @stime:		time spent in kernel mode, in &cputime_t units
561  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
562  *
563  * This structure groups together three kinds of CPU time that are tracked for
564  * threads and thread groups.  Most things considering CPU time want to group
565  * these counts together and treat all three of them in parallel.
566  */
567 struct task_cputime {
568 	cputime_t utime;
569 	cputime_t stime;
570 	unsigned long long sum_exec_runtime;
571 };
572 
573 /* Alternate field names when used to cache expirations. */
574 #define virt_exp	utime
575 #define prof_exp	stime
576 #define sched_exp	sum_exec_runtime
577 
578 #define INIT_CPUTIME	\
579 	(struct task_cputime) {					\
580 		.utime = 0,					\
581 		.stime = 0,					\
582 		.sum_exec_runtime = 0,				\
583 	}
584 
585 /*
586  * This is the atomic variant of task_cputime, which can be used for
587  * storing and updating task_cputime statistics without locking.
588  */
589 struct task_cputime_atomic {
590 	atomic64_t utime;
591 	atomic64_t stime;
592 	atomic64_t sum_exec_runtime;
593 };
594 
595 #define INIT_CPUTIME_ATOMIC \
596 	(struct task_cputime_atomic) {				\
597 		.utime = ATOMIC64_INIT(0),			\
598 		.stime = ATOMIC64_INIT(0),			\
599 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
600 	}
601 
602 #ifdef CONFIG_PREEMPT_COUNT
603 #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
604 #else
605 #define PREEMPT_DISABLED	PREEMPT_ENABLED
606 #endif
607 
608 /*
609  * Disable preemption until the scheduler is running.
610  * Reset by start_kernel()->sched_init()->init_idle().
611  *
612  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
613  * before the scheduler is active -- see should_resched().
614  */
615 #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
616 
617 /**
618  * struct thread_group_cputimer - thread group interval timer counts
619  * @cputime_atomic:	atomic thread group interval timers.
620  * @running:		non-zero when there are timers running and
621  * 			@cputime receives updates.
622  *
623  * This structure contains the version of task_cputime, above, that is
624  * used for thread group CPU timer calculations.
625  */
626 struct thread_group_cputimer {
627 	struct task_cputime_atomic cputime_atomic;
628 	int running;
629 };
630 
631 #include <linux/rwsem.h>
632 struct autogroup;
633 
634 /*
635  * NOTE! "signal_struct" does not have its own
636  * locking, because a shared signal_struct always
637  * implies a shared sighand_struct, so locking
638  * sighand_struct is always a proper superset of
639  * the locking of signal_struct.
640  */
641 struct signal_struct {
642 	atomic_t		sigcnt;
643 	atomic_t		live;
644 	int			nr_threads;
645 	struct list_head	thread_head;
646 
647 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
648 
649 	/* current thread group signal load-balancing target: */
650 	struct task_struct	*curr_target;
651 
652 	/* shared signal handling: */
653 	struct sigpending	shared_pending;
654 
655 	/* thread group exit support */
656 	int			group_exit_code;
657 	/* overloaded:
658 	 * - notify group_exit_task when ->count is equal to notify_count
659 	 * - everyone except group_exit_task is stopped during signal delivery
660 	 *   of fatal signals, group_exit_task processes the signal.
661 	 */
662 	int			notify_count;
663 	struct task_struct	*group_exit_task;
664 
665 	/* thread group stop support, overloads group_exit_code too */
666 	int			group_stop_count;
667 	unsigned int		flags; /* see SIGNAL_* flags below */
668 
669 	/*
670 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
671 	 * manager, to re-parent orphan (double-forking) child processes
672 	 * to this process instead of 'init'. The service manager is
673 	 * able to receive SIGCHLD signals and is able to investigate
674 	 * the process until it calls wait(). All children of this
675 	 * process will inherit a flag if they should look for a
676 	 * child_subreaper process at exit.
677 	 */
678 	unsigned int		is_child_subreaper:1;
679 	unsigned int		has_child_subreaper:1;
680 
681 	/* POSIX.1b Interval Timers */
682 	int			posix_timer_id;
683 	struct list_head	posix_timers;
684 
685 	/* ITIMER_REAL timer for the process */
686 	struct hrtimer real_timer;
687 	struct pid *leader_pid;
688 	ktime_t it_real_incr;
689 
690 	/*
691 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
692 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
693 	 * values are defined to 0 and 1 respectively
694 	 */
695 	struct cpu_itimer it[2];
696 
697 	/*
698 	 * Thread group totals for process CPU timers.
699 	 * See thread_group_cputimer(), et al, for details.
700 	 */
701 	struct thread_group_cputimer cputimer;
702 
703 	/* Earliest-expiration cache. */
704 	struct task_cputime cputime_expires;
705 
706 	struct list_head cpu_timers[3];
707 
708 	struct pid *tty_old_pgrp;
709 
710 	/* boolean value for session group leader */
711 	int leader;
712 
713 	struct tty_struct *tty; /* NULL if no tty */
714 
715 #ifdef CONFIG_SCHED_AUTOGROUP
716 	struct autogroup *autogroup;
717 #endif
718 	/*
719 	 * Cumulative resource counters for dead threads in the group,
720 	 * and for reaped dead child processes forked by this group.
721 	 * Live threads maintain their own counters and add to these
722 	 * in __exit_signal, except for the group leader.
723 	 */
724 	seqlock_t stats_lock;
725 	cputime_t utime, stime, cutime, cstime;
726 	cputime_t gtime;
727 	cputime_t cgtime;
728 	struct prev_cputime prev_cputime;
729 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
730 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
731 	unsigned long inblock, oublock, cinblock, coublock;
732 	unsigned long maxrss, cmaxrss;
733 	struct task_io_accounting ioac;
734 
735 	/*
736 	 * Cumulative ns of schedule CPU time fo dead threads in the
737 	 * group, not including a zombie group leader, (This only differs
738 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
739 	 * other than jiffies.)
740 	 */
741 	unsigned long long sum_sched_runtime;
742 
743 	/*
744 	 * We don't bother to synchronize most readers of this at all,
745 	 * because there is no reader checking a limit that actually needs
746 	 * to get both rlim_cur and rlim_max atomically, and either one
747 	 * alone is a single word that can safely be read normally.
748 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
749 	 * protect this instead of the siglock, because they really
750 	 * have no need to disable irqs.
751 	 */
752 	struct rlimit rlim[RLIM_NLIMITS];
753 
754 #ifdef CONFIG_BSD_PROCESS_ACCT
755 	struct pacct_struct pacct;	/* per-process accounting information */
756 #endif
757 #ifdef CONFIG_TASKSTATS
758 	struct taskstats *stats;
759 #endif
760 #ifdef CONFIG_AUDIT
761 	unsigned audit_tty;
762 	unsigned audit_tty_log_passwd;
763 	struct tty_audit_buf *tty_audit_buf;
764 #endif
765 
766 	oom_flags_t oom_flags;
767 	short oom_score_adj;		/* OOM kill score adjustment */
768 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
769 					 * Only settable by CAP_SYS_RESOURCE. */
770 
771 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
772 					 * credential calculations
773 					 * (notably. ptrace) */
774 };
775 
776 /*
777  * Bits in flags field of signal_struct.
778  */
779 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
780 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
781 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
782 #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
783 /*
784  * Pending notifications to parent.
785  */
786 #define SIGNAL_CLD_STOPPED	0x00000010
787 #define SIGNAL_CLD_CONTINUED	0x00000020
788 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
789 
790 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
791 
792 /* If true, all threads except ->group_exit_task have pending SIGKILL */
793 static inline int signal_group_exit(const struct signal_struct *sig)
794 {
795 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
796 		(sig->group_exit_task != NULL);
797 }
798 
799 /*
800  * Some day this will be a full-fledged user tracking system..
801  */
802 struct user_struct {
803 	atomic_t __count;	/* reference count */
804 	atomic_t processes;	/* How many processes does this user have? */
805 	atomic_t sigpending;	/* How many pending signals does this user have? */
806 #ifdef CONFIG_INOTIFY_USER
807 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
808 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
809 #endif
810 #ifdef CONFIG_FANOTIFY
811 	atomic_t fanotify_listeners;
812 #endif
813 #ifdef CONFIG_EPOLL
814 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
815 #endif
816 #ifdef CONFIG_POSIX_MQUEUE
817 	/* protected by mq_lock	*/
818 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
819 #endif
820 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
821 
822 #ifdef CONFIG_KEYS
823 	struct key *uid_keyring;	/* UID specific keyring */
824 	struct key *session_keyring;	/* UID's default session keyring */
825 #endif
826 
827 	/* Hash table maintenance information */
828 	struct hlist_node uidhash_node;
829 	kuid_t uid;
830 
831 #ifdef CONFIG_PERF_EVENTS
832 	atomic_long_t locked_vm;
833 #endif
834 };
835 
836 extern int uids_sysfs_init(void);
837 
838 extern struct user_struct *find_user(kuid_t);
839 
840 extern struct user_struct root_user;
841 #define INIT_USER (&root_user)
842 
843 
844 struct backing_dev_info;
845 struct reclaim_state;
846 
847 #ifdef CONFIG_SCHED_INFO
848 struct sched_info {
849 	/* cumulative counters */
850 	unsigned long pcount;	      /* # of times run on this cpu */
851 	unsigned long long run_delay; /* time spent waiting on a runqueue */
852 
853 	/* timestamps */
854 	unsigned long long last_arrival,/* when we last ran on a cpu */
855 			   last_queued;	/* when we were last queued to run */
856 };
857 #endif /* CONFIG_SCHED_INFO */
858 
859 #ifdef CONFIG_TASK_DELAY_ACCT
860 struct task_delay_info {
861 	spinlock_t	lock;
862 	unsigned int	flags;	/* Private per-task flags */
863 
864 	/* For each stat XXX, add following, aligned appropriately
865 	 *
866 	 * struct timespec XXX_start, XXX_end;
867 	 * u64 XXX_delay;
868 	 * u32 XXX_count;
869 	 *
870 	 * Atomicity of updates to XXX_delay, XXX_count protected by
871 	 * single lock above (split into XXX_lock if contention is an issue).
872 	 */
873 
874 	/*
875 	 * XXX_count is incremented on every XXX operation, the delay
876 	 * associated with the operation is added to XXX_delay.
877 	 * XXX_delay contains the accumulated delay time in nanoseconds.
878 	 */
879 	u64 blkio_start;	/* Shared by blkio, swapin */
880 	u64 blkio_delay;	/* wait for sync block io completion */
881 	u64 swapin_delay;	/* wait for swapin block io completion */
882 	u32 blkio_count;	/* total count of the number of sync block */
883 				/* io operations performed */
884 	u32 swapin_count;	/* total count of the number of swapin block */
885 				/* io operations performed */
886 
887 	u64 freepages_start;
888 	u64 freepages_delay;	/* wait for memory reclaim */
889 	u32 freepages_count;	/* total count of memory reclaim */
890 };
891 #endif	/* CONFIG_TASK_DELAY_ACCT */
892 
893 static inline int sched_info_on(void)
894 {
895 #ifdef CONFIG_SCHEDSTATS
896 	return 1;
897 #elif defined(CONFIG_TASK_DELAY_ACCT)
898 	extern int delayacct_on;
899 	return delayacct_on;
900 #else
901 	return 0;
902 #endif
903 }
904 
905 enum cpu_idle_type {
906 	CPU_IDLE,
907 	CPU_NOT_IDLE,
908 	CPU_NEWLY_IDLE,
909 	CPU_MAX_IDLE_TYPES
910 };
911 
912 /*
913  * Increase resolution of cpu_capacity calculations
914  */
915 #define SCHED_CAPACITY_SHIFT	10
916 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
917 
918 /*
919  * Wake-queues are lists of tasks with a pending wakeup, whose
920  * callers have already marked the task as woken internally,
921  * and can thus carry on. A common use case is being able to
922  * do the wakeups once the corresponding user lock as been
923  * released.
924  *
925  * We hold reference to each task in the list across the wakeup,
926  * thus guaranteeing that the memory is still valid by the time
927  * the actual wakeups are performed in wake_up_q().
928  *
929  * One per task suffices, because there's never a need for a task to be
930  * in two wake queues simultaneously; it is forbidden to abandon a task
931  * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
932  * already in a wake queue, the wakeup will happen soon and the second
933  * waker can just skip it.
934  *
935  * The WAKE_Q macro declares and initializes the list head.
936  * wake_up_q() does NOT reinitialize the list; it's expected to be
937  * called near the end of a function, where the fact that the queue is
938  * not used again will be easy to see by inspection.
939  *
940  * Note that this can cause spurious wakeups. schedule() callers
941  * must ensure the call is done inside a loop, confirming that the
942  * wakeup condition has in fact occurred.
943  */
944 struct wake_q_node {
945 	struct wake_q_node *next;
946 };
947 
948 struct wake_q_head {
949 	struct wake_q_node *first;
950 	struct wake_q_node **lastp;
951 };
952 
953 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
954 
955 #define WAKE_Q(name)					\
956 	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
957 
958 extern void wake_q_add(struct wake_q_head *head,
959 		       struct task_struct *task);
960 extern void wake_up_q(struct wake_q_head *head);
961 
962 /*
963  * sched-domains (multiprocessor balancing) declarations:
964  */
965 #ifdef CONFIG_SMP
966 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
967 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
968 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
969 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
970 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
971 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
972 #define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
973 #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
974 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
975 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
976 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
977 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
978 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
979 #define SD_NUMA			0x4000	/* cross-node balancing */
980 
981 #ifdef CONFIG_SCHED_SMT
982 static inline int cpu_smt_flags(void)
983 {
984 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
985 }
986 #endif
987 
988 #ifdef CONFIG_SCHED_MC
989 static inline int cpu_core_flags(void)
990 {
991 	return SD_SHARE_PKG_RESOURCES;
992 }
993 #endif
994 
995 #ifdef CONFIG_NUMA
996 static inline int cpu_numa_flags(void)
997 {
998 	return SD_NUMA;
999 }
1000 #endif
1001 
1002 struct sched_domain_attr {
1003 	int relax_domain_level;
1004 };
1005 
1006 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
1007 	.relax_domain_level = -1,			\
1008 }
1009 
1010 extern int sched_domain_level_max;
1011 
1012 struct sched_group;
1013 
1014 struct sched_domain {
1015 	/* These fields must be setup */
1016 	struct sched_domain *parent;	/* top domain must be null terminated */
1017 	struct sched_domain *child;	/* bottom domain must be null terminated */
1018 	struct sched_group *groups;	/* the balancing groups of the domain */
1019 	unsigned long min_interval;	/* Minimum balance interval ms */
1020 	unsigned long max_interval;	/* Maximum balance interval ms */
1021 	unsigned int busy_factor;	/* less balancing by factor if busy */
1022 	unsigned int imbalance_pct;	/* No balance until over watermark */
1023 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
1024 	unsigned int busy_idx;
1025 	unsigned int idle_idx;
1026 	unsigned int newidle_idx;
1027 	unsigned int wake_idx;
1028 	unsigned int forkexec_idx;
1029 	unsigned int smt_gain;
1030 
1031 	int nohz_idle;			/* NOHZ IDLE status */
1032 	int flags;			/* See SD_* */
1033 	int level;
1034 
1035 	/* Runtime fields. */
1036 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
1037 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
1038 	unsigned int nr_balance_failed; /* initialise to 0 */
1039 
1040 	/* idle_balance() stats */
1041 	u64 max_newidle_lb_cost;
1042 	unsigned long next_decay_max_lb_cost;
1043 
1044 #ifdef CONFIG_SCHEDSTATS
1045 	/* load_balance() stats */
1046 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1047 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1048 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1049 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1050 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1051 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1052 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1053 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1054 
1055 	/* Active load balancing */
1056 	unsigned int alb_count;
1057 	unsigned int alb_failed;
1058 	unsigned int alb_pushed;
1059 
1060 	/* SD_BALANCE_EXEC stats */
1061 	unsigned int sbe_count;
1062 	unsigned int sbe_balanced;
1063 	unsigned int sbe_pushed;
1064 
1065 	/* SD_BALANCE_FORK stats */
1066 	unsigned int sbf_count;
1067 	unsigned int sbf_balanced;
1068 	unsigned int sbf_pushed;
1069 
1070 	/* try_to_wake_up() stats */
1071 	unsigned int ttwu_wake_remote;
1072 	unsigned int ttwu_move_affine;
1073 	unsigned int ttwu_move_balance;
1074 #endif
1075 #ifdef CONFIG_SCHED_DEBUG
1076 	char *name;
1077 #endif
1078 	union {
1079 		void *private;		/* used during construction */
1080 		struct rcu_head rcu;	/* used during destruction */
1081 	};
1082 
1083 	unsigned int span_weight;
1084 	/*
1085 	 * Span of all CPUs in this domain.
1086 	 *
1087 	 * NOTE: this field is variable length. (Allocated dynamically
1088 	 * by attaching extra space to the end of the structure,
1089 	 * depending on how many CPUs the kernel has booted up with)
1090 	 */
1091 	unsigned long span[0];
1092 };
1093 
1094 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1095 {
1096 	return to_cpumask(sd->span);
1097 }
1098 
1099 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1100 				    struct sched_domain_attr *dattr_new);
1101 
1102 /* Allocate an array of sched domains, for partition_sched_domains(). */
1103 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1104 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1105 
1106 bool cpus_share_cache(int this_cpu, int that_cpu);
1107 
1108 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1109 typedef int (*sched_domain_flags_f)(void);
1110 
1111 #define SDTL_OVERLAP	0x01
1112 
1113 struct sd_data {
1114 	struct sched_domain **__percpu sd;
1115 	struct sched_group **__percpu sg;
1116 	struct sched_group_capacity **__percpu sgc;
1117 };
1118 
1119 struct sched_domain_topology_level {
1120 	sched_domain_mask_f mask;
1121 	sched_domain_flags_f sd_flags;
1122 	int		    flags;
1123 	int		    numa_level;
1124 	struct sd_data      data;
1125 #ifdef CONFIG_SCHED_DEBUG
1126 	char                *name;
1127 #endif
1128 };
1129 
1130 extern struct sched_domain_topology_level *sched_domain_topology;
1131 
1132 extern void set_sched_topology(struct sched_domain_topology_level *tl);
1133 extern void wake_up_if_idle(int cpu);
1134 
1135 #ifdef CONFIG_SCHED_DEBUG
1136 # define SD_INIT_NAME(type)		.name = #type
1137 #else
1138 # define SD_INIT_NAME(type)
1139 #endif
1140 
1141 #else /* CONFIG_SMP */
1142 
1143 struct sched_domain_attr;
1144 
1145 static inline void
1146 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1147 			struct sched_domain_attr *dattr_new)
1148 {
1149 }
1150 
1151 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1152 {
1153 	return true;
1154 }
1155 
1156 #endif	/* !CONFIG_SMP */
1157 
1158 
1159 struct io_context;			/* See blkdev.h */
1160 
1161 
1162 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1163 extern void prefetch_stack(struct task_struct *t);
1164 #else
1165 static inline void prefetch_stack(struct task_struct *t) { }
1166 #endif
1167 
1168 struct audit_context;		/* See audit.c */
1169 struct mempolicy;
1170 struct pipe_inode_info;
1171 struct uts_namespace;
1172 
1173 struct load_weight {
1174 	unsigned long weight;
1175 	u32 inv_weight;
1176 };
1177 
1178 /*
1179  * The load_avg/util_avg accumulates an infinite geometric series.
1180  * 1) load_avg factors the amount of time that a sched_entity is
1181  * runnable on a rq into its weight. For cfs_rq, it is the aggregated
1182  * such weights of all runnable and blocked sched_entities.
1183  * 2) util_avg factors frequency scaling into the amount of time
1184  * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
1185  * For cfs_rq, it is the aggregated such times of all runnable and
1186  * blocked sched_entities.
1187  * The 64 bit load_sum can:
1188  * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
1189  * the highest weight (=88761) always runnable, we should not overflow
1190  * 2) for entity, support any load.weight always runnable
1191  */
1192 struct sched_avg {
1193 	u64 last_update_time, load_sum;
1194 	u32 util_sum, period_contrib;
1195 	unsigned long load_avg, util_avg;
1196 };
1197 
1198 #ifdef CONFIG_SCHEDSTATS
1199 struct sched_statistics {
1200 	u64			wait_start;
1201 	u64			wait_max;
1202 	u64			wait_count;
1203 	u64			wait_sum;
1204 	u64			iowait_count;
1205 	u64			iowait_sum;
1206 
1207 	u64			sleep_start;
1208 	u64			sleep_max;
1209 	s64			sum_sleep_runtime;
1210 
1211 	u64			block_start;
1212 	u64			block_max;
1213 	u64			exec_max;
1214 	u64			slice_max;
1215 
1216 	u64			nr_migrations_cold;
1217 	u64			nr_failed_migrations_affine;
1218 	u64			nr_failed_migrations_running;
1219 	u64			nr_failed_migrations_hot;
1220 	u64			nr_forced_migrations;
1221 
1222 	u64			nr_wakeups;
1223 	u64			nr_wakeups_sync;
1224 	u64			nr_wakeups_migrate;
1225 	u64			nr_wakeups_local;
1226 	u64			nr_wakeups_remote;
1227 	u64			nr_wakeups_affine;
1228 	u64			nr_wakeups_affine_attempts;
1229 	u64			nr_wakeups_passive;
1230 	u64			nr_wakeups_idle;
1231 };
1232 #endif
1233 
1234 struct sched_entity {
1235 	struct load_weight	load;		/* for load-balancing */
1236 	struct rb_node		run_node;
1237 	struct list_head	group_node;
1238 	unsigned int		on_rq;
1239 
1240 	u64			exec_start;
1241 	u64			sum_exec_runtime;
1242 	u64			vruntime;
1243 	u64			prev_sum_exec_runtime;
1244 
1245 	u64			nr_migrations;
1246 
1247 #ifdef CONFIG_SCHEDSTATS
1248 	struct sched_statistics statistics;
1249 #endif
1250 
1251 #ifdef CONFIG_FAIR_GROUP_SCHED
1252 	int			depth;
1253 	struct sched_entity	*parent;
1254 	/* rq on which this entity is (to be) queued: */
1255 	struct cfs_rq		*cfs_rq;
1256 	/* rq "owned" by this entity/group: */
1257 	struct cfs_rq		*my_q;
1258 #endif
1259 
1260 #ifdef CONFIG_SMP
1261 	/* Per entity load average tracking */
1262 	struct sched_avg	avg;
1263 #endif
1264 };
1265 
1266 struct sched_rt_entity {
1267 	struct list_head run_list;
1268 	unsigned long timeout;
1269 	unsigned long watchdog_stamp;
1270 	unsigned int time_slice;
1271 
1272 	struct sched_rt_entity *back;
1273 #ifdef CONFIG_RT_GROUP_SCHED
1274 	struct sched_rt_entity	*parent;
1275 	/* rq on which this entity is (to be) queued: */
1276 	struct rt_rq		*rt_rq;
1277 	/* rq "owned" by this entity/group: */
1278 	struct rt_rq		*my_q;
1279 #endif
1280 };
1281 
1282 struct sched_dl_entity {
1283 	struct rb_node	rb_node;
1284 
1285 	/*
1286 	 * Original scheduling parameters. Copied here from sched_attr
1287 	 * during sched_setattr(), they will remain the same until
1288 	 * the next sched_setattr().
1289 	 */
1290 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1291 	u64 dl_deadline;	/* relative deadline of each instance	*/
1292 	u64 dl_period;		/* separation of two instances (period) */
1293 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1294 
1295 	/*
1296 	 * Actual scheduling parameters. Initialized with the values above,
1297 	 * they are continously updated during task execution. Note that
1298 	 * the remaining runtime could be < 0 in case we are in overrun.
1299 	 */
1300 	s64 runtime;		/* remaining runtime for this instance	*/
1301 	u64 deadline;		/* absolute deadline for this instance	*/
1302 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1303 
1304 	/*
1305 	 * Some bool flags:
1306 	 *
1307 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1308 	 * task has to wait for a replenishment to be performed at the
1309 	 * next firing of dl_timer.
1310 	 *
1311 	 * @dl_new tells if a new instance arrived. If so we must
1312 	 * start executing it with full runtime and reset its absolute
1313 	 * deadline;
1314 	 *
1315 	 * @dl_boosted tells if we are boosted due to DI. If so we are
1316 	 * outside bandwidth enforcement mechanism (but only until we
1317 	 * exit the critical section);
1318 	 *
1319 	 * @dl_yielded tells if task gave up the cpu before consuming
1320 	 * all its available runtime during the last job.
1321 	 */
1322 	int dl_throttled, dl_new, dl_boosted, dl_yielded;
1323 
1324 	/*
1325 	 * Bandwidth enforcement timer. Each -deadline task has its
1326 	 * own bandwidth to be enforced, thus we need one timer per task.
1327 	 */
1328 	struct hrtimer dl_timer;
1329 };
1330 
1331 union rcu_special {
1332 	struct {
1333 		bool blocked;
1334 		bool need_qs;
1335 	} b;
1336 	short s;
1337 };
1338 struct rcu_node;
1339 
1340 enum perf_event_task_context {
1341 	perf_invalid_context = -1,
1342 	perf_hw_context = 0,
1343 	perf_sw_context,
1344 	perf_nr_task_contexts,
1345 };
1346 
1347 /* Track pages that require TLB flushes */
1348 struct tlbflush_unmap_batch {
1349 	/*
1350 	 * Each bit set is a CPU that potentially has a TLB entry for one of
1351 	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1352 	 */
1353 	struct cpumask cpumask;
1354 
1355 	/* True if any bit in cpumask is set */
1356 	bool flush_required;
1357 
1358 	/*
1359 	 * If true then the PTE was dirty when unmapped. The entry must be
1360 	 * flushed before IO is initiated or a stale TLB entry potentially
1361 	 * allows an update without redirtying the page.
1362 	 */
1363 	bool writable;
1364 };
1365 
1366 struct task_struct {
1367 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1368 	void *stack;
1369 	atomic_t usage;
1370 	unsigned int flags;	/* per process flags, defined below */
1371 	unsigned int ptrace;
1372 
1373 #ifdef CONFIG_SMP
1374 	struct llist_node wake_entry;
1375 	int on_cpu;
1376 	unsigned int wakee_flips;
1377 	unsigned long wakee_flip_decay_ts;
1378 	struct task_struct *last_wakee;
1379 
1380 	int wake_cpu;
1381 #endif
1382 	int on_rq;
1383 
1384 	int prio, static_prio, normal_prio;
1385 	unsigned int rt_priority;
1386 	const struct sched_class *sched_class;
1387 	struct sched_entity se;
1388 	struct sched_rt_entity rt;
1389 #ifdef CONFIG_CGROUP_SCHED
1390 	struct task_group *sched_task_group;
1391 #endif
1392 	struct sched_dl_entity dl;
1393 
1394 #ifdef CONFIG_PREEMPT_NOTIFIERS
1395 	/* list of struct preempt_notifier: */
1396 	struct hlist_head preempt_notifiers;
1397 #endif
1398 
1399 #ifdef CONFIG_BLK_DEV_IO_TRACE
1400 	unsigned int btrace_seq;
1401 #endif
1402 
1403 	unsigned int policy;
1404 	int nr_cpus_allowed;
1405 	cpumask_t cpus_allowed;
1406 
1407 #ifdef CONFIG_PREEMPT_RCU
1408 	int rcu_read_lock_nesting;
1409 	union rcu_special rcu_read_unlock_special;
1410 	struct list_head rcu_node_entry;
1411 	struct rcu_node *rcu_blocked_node;
1412 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1413 #ifdef CONFIG_TASKS_RCU
1414 	unsigned long rcu_tasks_nvcsw;
1415 	bool rcu_tasks_holdout;
1416 	struct list_head rcu_tasks_holdout_list;
1417 	int rcu_tasks_idle_cpu;
1418 #endif /* #ifdef CONFIG_TASKS_RCU */
1419 
1420 #ifdef CONFIG_SCHED_INFO
1421 	struct sched_info sched_info;
1422 #endif
1423 
1424 	struct list_head tasks;
1425 #ifdef CONFIG_SMP
1426 	struct plist_node pushable_tasks;
1427 	struct rb_node pushable_dl_tasks;
1428 #endif
1429 
1430 	struct mm_struct *mm, *active_mm;
1431 	/* per-thread vma caching */
1432 	u32 vmacache_seqnum;
1433 	struct vm_area_struct *vmacache[VMACACHE_SIZE];
1434 #if defined(SPLIT_RSS_COUNTING)
1435 	struct task_rss_stat	rss_stat;
1436 #endif
1437 /* task state */
1438 	int exit_state;
1439 	int exit_code, exit_signal;
1440 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1441 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
1442 
1443 	/* Used for emulating ABI behavior of previous Linux versions */
1444 	unsigned int personality;
1445 
1446 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1447 				 * execve */
1448 	unsigned in_iowait:1;
1449 
1450 	/* Revert to default priority/policy when forking */
1451 	unsigned sched_reset_on_fork:1;
1452 	unsigned sched_contributes_to_load:1;
1453 	unsigned sched_migrated:1;
1454 
1455 #ifdef CONFIG_MEMCG_KMEM
1456 	unsigned memcg_kmem_skip_account:1;
1457 #endif
1458 #ifdef CONFIG_COMPAT_BRK
1459 	unsigned brk_randomized:1;
1460 #endif
1461 
1462 	unsigned long atomic_flags; /* Flags needing atomic access. */
1463 
1464 	struct restart_block restart_block;
1465 
1466 	pid_t pid;
1467 	pid_t tgid;
1468 
1469 #ifdef CONFIG_CC_STACKPROTECTOR
1470 	/* Canary value for the -fstack-protector gcc feature */
1471 	unsigned long stack_canary;
1472 #endif
1473 	/*
1474 	 * pointers to (original) parent process, youngest child, younger sibling,
1475 	 * older sibling, respectively.  (p->father can be replaced with
1476 	 * p->real_parent->pid)
1477 	 */
1478 	struct task_struct __rcu *real_parent; /* real parent process */
1479 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1480 	/*
1481 	 * children/sibling forms the list of my natural children
1482 	 */
1483 	struct list_head children;	/* list of my children */
1484 	struct list_head sibling;	/* linkage in my parent's children list */
1485 	struct task_struct *group_leader;	/* threadgroup leader */
1486 
1487 	/*
1488 	 * ptraced is the list of tasks this task is using ptrace on.
1489 	 * This includes both natural children and PTRACE_ATTACH targets.
1490 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1491 	 */
1492 	struct list_head ptraced;
1493 	struct list_head ptrace_entry;
1494 
1495 	/* PID/PID hash table linkage. */
1496 	struct pid_link pids[PIDTYPE_MAX];
1497 	struct list_head thread_group;
1498 	struct list_head thread_node;
1499 
1500 	struct completion *vfork_done;		/* for vfork() */
1501 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1502 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1503 
1504 	cputime_t utime, stime, utimescaled, stimescaled;
1505 	cputime_t gtime;
1506 	struct prev_cputime prev_cputime;
1507 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1508 	seqlock_t vtime_seqlock;
1509 	unsigned long long vtime_snap;
1510 	enum {
1511 		VTIME_SLEEPING = 0,
1512 		VTIME_USER,
1513 		VTIME_SYS,
1514 	} vtime_snap_whence;
1515 #endif
1516 	unsigned long nvcsw, nivcsw; /* context switch counts */
1517 	u64 start_time;		/* monotonic time in nsec */
1518 	u64 real_start_time;	/* boot based time in nsec */
1519 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1520 	unsigned long min_flt, maj_flt;
1521 
1522 	struct task_cputime cputime_expires;
1523 	struct list_head cpu_timers[3];
1524 
1525 /* process credentials */
1526 	const struct cred __rcu *real_cred; /* objective and real subjective task
1527 					 * credentials (COW) */
1528 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1529 					 * credentials (COW) */
1530 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1531 				     - access with [gs]et_task_comm (which lock
1532 				       it with task_lock())
1533 				     - initialized normally by setup_new_exec */
1534 /* file system info */
1535 	struct nameidata *nameidata;
1536 #ifdef CONFIG_SYSVIPC
1537 /* ipc stuff */
1538 	struct sysv_sem sysvsem;
1539 	struct sysv_shm sysvshm;
1540 #endif
1541 #ifdef CONFIG_DETECT_HUNG_TASK
1542 /* hung task detection */
1543 	unsigned long last_switch_count;
1544 #endif
1545 /* filesystem information */
1546 	struct fs_struct *fs;
1547 /* open file information */
1548 	struct files_struct *files;
1549 /* namespaces */
1550 	struct nsproxy *nsproxy;
1551 /* signal handlers */
1552 	struct signal_struct *signal;
1553 	struct sighand_struct *sighand;
1554 
1555 	sigset_t blocked, real_blocked;
1556 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1557 	struct sigpending pending;
1558 
1559 	unsigned long sas_ss_sp;
1560 	size_t sas_ss_size;
1561 	int (*notifier)(void *priv);
1562 	void *notifier_data;
1563 	sigset_t *notifier_mask;
1564 	struct callback_head *task_works;
1565 
1566 	struct audit_context *audit_context;
1567 #ifdef CONFIG_AUDITSYSCALL
1568 	kuid_t loginuid;
1569 	unsigned int sessionid;
1570 #endif
1571 	struct seccomp seccomp;
1572 
1573 /* Thread group tracking */
1574    	u32 parent_exec_id;
1575    	u32 self_exec_id;
1576 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1577  * mempolicy */
1578 	spinlock_t alloc_lock;
1579 
1580 	/* Protection of the PI data structures: */
1581 	raw_spinlock_t pi_lock;
1582 
1583 	struct wake_q_node wake_q;
1584 
1585 #ifdef CONFIG_RT_MUTEXES
1586 	/* PI waiters blocked on a rt_mutex held by this task */
1587 	struct rb_root pi_waiters;
1588 	struct rb_node *pi_waiters_leftmost;
1589 	/* Deadlock detection and priority inheritance handling */
1590 	struct rt_mutex_waiter *pi_blocked_on;
1591 #endif
1592 
1593 #ifdef CONFIG_DEBUG_MUTEXES
1594 	/* mutex deadlock detection */
1595 	struct mutex_waiter *blocked_on;
1596 #endif
1597 #ifdef CONFIG_TRACE_IRQFLAGS
1598 	unsigned int irq_events;
1599 	unsigned long hardirq_enable_ip;
1600 	unsigned long hardirq_disable_ip;
1601 	unsigned int hardirq_enable_event;
1602 	unsigned int hardirq_disable_event;
1603 	int hardirqs_enabled;
1604 	int hardirq_context;
1605 	unsigned long softirq_disable_ip;
1606 	unsigned long softirq_enable_ip;
1607 	unsigned int softirq_disable_event;
1608 	unsigned int softirq_enable_event;
1609 	int softirqs_enabled;
1610 	int softirq_context;
1611 #endif
1612 #ifdef CONFIG_LOCKDEP
1613 # define MAX_LOCK_DEPTH 48UL
1614 	u64 curr_chain_key;
1615 	int lockdep_depth;
1616 	unsigned int lockdep_recursion;
1617 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1618 	gfp_t lockdep_reclaim_gfp;
1619 #endif
1620 
1621 /* journalling filesystem info */
1622 	void *journal_info;
1623 
1624 /* stacked block device info */
1625 	struct bio_list *bio_list;
1626 
1627 #ifdef CONFIG_BLOCK
1628 /* stack plugging */
1629 	struct blk_plug *plug;
1630 #endif
1631 
1632 /* VM state */
1633 	struct reclaim_state *reclaim_state;
1634 
1635 	struct backing_dev_info *backing_dev_info;
1636 
1637 	struct io_context *io_context;
1638 
1639 	unsigned long ptrace_message;
1640 	siginfo_t *last_siginfo; /* For ptrace use.  */
1641 	struct task_io_accounting ioac;
1642 #if defined(CONFIG_TASK_XACCT)
1643 	u64 acct_rss_mem1;	/* accumulated rss usage */
1644 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1645 	cputime_t acct_timexpd;	/* stime + utime since last update */
1646 #endif
1647 #ifdef CONFIG_CPUSETS
1648 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1649 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1650 	int cpuset_mem_spread_rotor;
1651 	int cpuset_slab_spread_rotor;
1652 #endif
1653 #ifdef CONFIG_CGROUPS
1654 	/* Control Group info protected by css_set_lock */
1655 	struct css_set __rcu *cgroups;
1656 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1657 	struct list_head cg_list;
1658 #endif
1659 #ifdef CONFIG_FUTEX
1660 	struct robust_list_head __user *robust_list;
1661 #ifdef CONFIG_COMPAT
1662 	struct compat_robust_list_head __user *compat_robust_list;
1663 #endif
1664 	struct list_head pi_state_list;
1665 	struct futex_pi_state *pi_state_cache;
1666 #endif
1667 #ifdef CONFIG_PERF_EVENTS
1668 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1669 	struct mutex perf_event_mutex;
1670 	struct list_head perf_event_list;
1671 #endif
1672 #ifdef CONFIG_DEBUG_PREEMPT
1673 	unsigned long preempt_disable_ip;
1674 #endif
1675 #ifdef CONFIG_NUMA
1676 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1677 	short il_next;
1678 	short pref_node_fork;
1679 #endif
1680 #ifdef CONFIG_NUMA_BALANCING
1681 	int numa_scan_seq;
1682 	unsigned int numa_scan_period;
1683 	unsigned int numa_scan_period_max;
1684 	int numa_preferred_nid;
1685 	unsigned long numa_migrate_retry;
1686 	u64 node_stamp;			/* migration stamp  */
1687 	u64 last_task_numa_placement;
1688 	u64 last_sum_exec_runtime;
1689 	struct callback_head numa_work;
1690 
1691 	struct list_head numa_entry;
1692 	struct numa_group *numa_group;
1693 
1694 	/*
1695 	 * numa_faults is an array split into four regions:
1696 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1697 	 * in this precise order.
1698 	 *
1699 	 * faults_memory: Exponential decaying average of faults on a per-node
1700 	 * basis. Scheduling placement decisions are made based on these
1701 	 * counts. The values remain static for the duration of a PTE scan.
1702 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1703 	 * hinting fault was incurred.
1704 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1705 	 * during the current scan window. When the scan completes, the counts
1706 	 * in faults_memory and faults_cpu decay and these values are copied.
1707 	 */
1708 	unsigned long *numa_faults;
1709 	unsigned long total_numa_faults;
1710 
1711 	/*
1712 	 * numa_faults_locality tracks if faults recorded during the last
1713 	 * scan window were remote/local or failed to migrate. The task scan
1714 	 * period is adapted based on the locality of the faults with different
1715 	 * weights depending on whether they were shared or private faults
1716 	 */
1717 	unsigned long numa_faults_locality[3];
1718 
1719 	unsigned long numa_pages_migrated;
1720 #endif /* CONFIG_NUMA_BALANCING */
1721 
1722 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1723 	struct tlbflush_unmap_batch tlb_ubc;
1724 #endif
1725 
1726 	struct rcu_head rcu;
1727 
1728 	/*
1729 	 * cache last used pipe for splice
1730 	 */
1731 	struct pipe_inode_info *splice_pipe;
1732 
1733 	struct page_frag task_frag;
1734 
1735 #ifdef	CONFIG_TASK_DELAY_ACCT
1736 	struct task_delay_info *delays;
1737 #endif
1738 #ifdef CONFIG_FAULT_INJECTION
1739 	int make_it_fail;
1740 #endif
1741 	/*
1742 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1743 	 * balance_dirty_pages() for some dirty throttling pause
1744 	 */
1745 	int nr_dirtied;
1746 	int nr_dirtied_pause;
1747 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1748 
1749 #ifdef CONFIG_LATENCYTOP
1750 	int latency_record_count;
1751 	struct latency_record latency_record[LT_SAVECOUNT];
1752 #endif
1753 	/*
1754 	 * time slack values; these are used to round up poll() and
1755 	 * select() etc timeout values. These are in nanoseconds.
1756 	 */
1757 	unsigned long timer_slack_ns;
1758 	unsigned long default_timer_slack_ns;
1759 
1760 #ifdef CONFIG_KASAN
1761 	unsigned int kasan_depth;
1762 #endif
1763 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1764 	/* Index of current stored address in ret_stack */
1765 	int curr_ret_stack;
1766 	/* Stack of return addresses for return function tracing */
1767 	struct ftrace_ret_stack	*ret_stack;
1768 	/* time stamp for last schedule */
1769 	unsigned long long ftrace_timestamp;
1770 	/*
1771 	 * Number of functions that haven't been traced
1772 	 * because of depth overrun.
1773 	 */
1774 	atomic_t trace_overrun;
1775 	/* Pause for the tracing */
1776 	atomic_t tracing_graph_pause;
1777 #endif
1778 #ifdef CONFIG_TRACING
1779 	/* state flags for use by tracers */
1780 	unsigned long trace;
1781 	/* bitmask and counter of trace recursion */
1782 	unsigned long trace_recursion;
1783 #endif /* CONFIG_TRACING */
1784 #ifdef CONFIG_MEMCG
1785 	struct memcg_oom_info {
1786 		struct mem_cgroup *memcg;
1787 		gfp_t gfp_mask;
1788 		int order;
1789 		unsigned int may_oom:1;
1790 	} memcg_oom;
1791 #endif
1792 #ifdef CONFIG_UPROBES
1793 	struct uprobe_task *utask;
1794 #endif
1795 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1796 	unsigned int	sequential_io;
1797 	unsigned int	sequential_io_avg;
1798 #endif
1799 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1800 	unsigned long	task_state_change;
1801 #endif
1802 	int pagefault_disabled;
1803 /* CPU-specific state of this task */
1804 	struct thread_struct thread;
1805 /*
1806  * WARNING: on x86, 'thread_struct' contains a variable-sized
1807  * structure.  It *MUST* be at the end of 'task_struct'.
1808  *
1809  * Do not put anything below here!
1810  */
1811 };
1812 
1813 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1814 extern int arch_task_struct_size __read_mostly;
1815 #else
1816 # define arch_task_struct_size (sizeof(struct task_struct))
1817 #endif
1818 
1819 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1820 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1821 
1822 #define TNF_MIGRATED	0x01
1823 #define TNF_NO_GROUP	0x02
1824 #define TNF_SHARED	0x04
1825 #define TNF_FAULT_LOCAL	0x08
1826 #define TNF_MIGRATE_FAIL 0x10
1827 
1828 #ifdef CONFIG_NUMA_BALANCING
1829 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1830 extern pid_t task_numa_group_id(struct task_struct *p);
1831 extern void set_numabalancing_state(bool enabled);
1832 extern void task_numa_free(struct task_struct *p);
1833 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1834 					int src_nid, int dst_cpu);
1835 #else
1836 static inline void task_numa_fault(int last_node, int node, int pages,
1837 				   int flags)
1838 {
1839 }
1840 static inline pid_t task_numa_group_id(struct task_struct *p)
1841 {
1842 	return 0;
1843 }
1844 static inline void set_numabalancing_state(bool enabled)
1845 {
1846 }
1847 static inline void task_numa_free(struct task_struct *p)
1848 {
1849 }
1850 static inline bool should_numa_migrate_memory(struct task_struct *p,
1851 				struct page *page, int src_nid, int dst_cpu)
1852 {
1853 	return true;
1854 }
1855 #endif
1856 
1857 static inline struct pid *task_pid(struct task_struct *task)
1858 {
1859 	return task->pids[PIDTYPE_PID].pid;
1860 }
1861 
1862 static inline struct pid *task_tgid(struct task_struct *task)
1863 {
1864 	return task->group_leader->pids[PIDTYPE_PID].pid;
1865 }
1866 
1867 /*
1868  * Without tasklist or rcu lock it is not safe to dereference
1869  * the result of task_pgrp/task_session even if task == current,
1870  * we can race with another thread doing sys_setsid/sys_setpgid.
1871  */
1872 static inline struct pid *task_pgrp(struct task_struct *task)
1873 {
1874 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1875 }
1876 
1877 static inline struct pid *task_session(struct task_struct *task)
1878 {
1879 	return task->group_leader->pids[PIDTYPE_SID].pid;
1880 }
1881 
1882 struct pid_namespace;
1883 
1884 /*
1885  * the helpers to get the task's different pids as they are seen
1886  * from various namespaces
1887  *
1888  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1889  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1890  *                     current.
1891  * task_xid_nr_ns()  : id seen from the ns specified;
1892  *
1893  * set_task_vxid()   : assigns a virtual id to a task;
1894  *
1895  * see also pid_nr() etc in include/linux/pid.h
1896  */
1897 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1898 			struct pid_namespace *ns);
1899 
1900 static inline pid_t task_pid_nr(struct task_struct *tsk)
1901 {
1902 	return tsk->pid;
1903 }
1904 
1905 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1906 					struct pid_namespace *ns)
1907 {
1908 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1909 }
1910 
1911 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1912 {
1913 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1914 }
1915 
1916 
1917 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1918 {
1919 	return tsk->tgid;
1920 }
1921 
1922 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1923 
1924 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1925 {
1926 	return pid_vnr(task_tgid(tsk));
1927 }
1928 
1929 
1930 static inline int pid_alive(const struct task_struct *p);
1931 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1932 {
1933 	pid_t pid = 0;
1934 
1935 	rcu_read_lock();
1936 	if (pid_alive(tsk))
1937 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1938 	rcu_read_unlock();
1939 
1940 	return pid;
1941 }
1942 
1943 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1944 {
1945 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1946 }
1947 
1948 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1949 					struct pid_namespace *ns)
1950 {
1951 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1952 }
1953 
1954 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1955 {
1956 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1957 }
1958 
1959 
1960 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1961 					struct pid_namespace *ns)
1962 {
1963 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1964 }
1965 
1966 static inline pid_t task_session_vnr(struct task_struct *tsk)
1967 {
1968 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1969 }
1970 
1971 /* obsolete, do not use */
1972 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1973 {
1974 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1975 }
1976 
1977 /**
1978  * pid_alive - check that a task structure is not stale
1979  * @p: Task structure to be checked.
1980  *
1981  * Test if a process is not yet dead (at most zombie state)
1982  * If pid_alive fails, then pointers within the task structure
1983  * can be stale and must not be dereferenced.
1984  *
1985  * Return: 1 if the process is alive. 0 otherwise.
1986  */
1987 static inline int pid_alive(const struct task_struct *p)
1988 {
1989 	return p->pids[PIDTYPE_PID].pid != NULL;
1990 }
1991 
1992 /**
1993  * is_global_init - check if a task structure is init
1994  * @tsk: Task structure to be checked.
1995  *
1996  * Check if a task structure is the first user space task the kernel created.
1997  *
1998  * Return: 1 if the task structure is init. 0 otherwise.
1999  */
2000 static inline int is_global_init(struct task_struct *tsk)
2001 {
2002 	return tsk->pid == 1;
2003 }
2004 
2005 extern struct pid *cad_pid;
2006 
2007 extern void free_task(struct task_struct *tsk);
2008 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2009 
2010 extern void __put_task_struct(struct task_struct *t);
2011 
2012 static inline void put_task_struct(struct task_struct *t)
2013 {
2014 	if (atomic_dec_and_test(&t->usage))
2015 		__put_task_struct(t);
2016 }
2017 
2018 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2019 extern void task_cputime(struct task_struct *t,
2020 			 cputime_t *utime, cputime_t *stime);
2021 extern void task_cputime_scaled(struct task_struct *t,
2022 				cputime_t *utimescaled, cputime_t *stimescaled);
2023 extern cputime_t task_gtime(struct task_struct *t);
2024 #else
2025 static inline void task_cputime(struct task_struct *t,
2026 				cputime_t *utime, cputime_t *stime)
2027 {
2028 	if (utime)
2029 		*utime = t->utime;
2030 	if (stime)
2031 		*stime = t->stime;
2032 }
2033 
2034 static inline void task_cputime_scaled(struct task_struct *t,
2035 				       cputime_t *utimescaled,
2036 				       cputime_t *stimescaled)
2037 {
2038 	if (utimescaled)
2039 		*utimescaled = t->utimescaled;
2040 	if (stimescaled)
2041 		*stimescaled = t->stimescaled;
2042 }
2043 
2044 static inline cputime_t task_gtime(struct task_struct *t)
2045 {
2046 	return t->gtime;
2047 }
2048 #endif
2049 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2050 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2051 
2052 /*
2053  * Per process flags
2054  */
2055 #define PF_EXITING	0x00000004	/* getting shut down */
2056 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
2057 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
2058 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
2059 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
2060 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
2061 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
2062 #define PF_DUMPCORE	0x00000200	/* dumped core */
2063 #define PF_SIGNALED	0x00000400	/* killed by a signal */
2064 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
2065 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
2066 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
2067 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
2068 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
2069 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
2070 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
2071 #define PF_KSWAPD	0x00040000	/* I am kswapd */
2072 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
2073 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
2074 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
2075 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
2076 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
2077 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
2078 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
2079 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
2080 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
2081 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
2082 
2083 /*
2084  * Only the _current_ task can read/write to tsk->flags, but other
2085  * tasks can access tsk->flags in readonly mode for example
2086  * with tsk_used_math (like during threaded core dumping).
2087  * There is however an exception to this rule during ptrace
2088  * or during fork: the ptracer task is allowed to write to the
2089  * child->flags of its traced child (same goes for fork, the parent
2090  * can write to the child->flags), because we're guaranteed the
2091  * child is not running and in turn not changing child->flags
2092  * at the same time the parent does it.
2093  */
2094 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2095 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2096 #define clear_used_math() clear_stopped_child_used_math(current)
2097 #define set_used_math() set_stopped_child_used_math(current)
2098 #define conditional_stopped_child_used_math(condition, child) \
2099 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2100 #define conditional_used_math(condition) \
2101 	conditional_stopped_child_used_math(condition, current)
2102 #define copy_to_stopped_child_used_math(child) \
2103 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2104 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2105 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2106 #define used_math() tsk_used_math(current)
2107 
2108 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2109  * __GFP_FS is also cleared as it implies __GFP_IO.
2110  */
2111 static inline gfp_t memalloc_noio_flags(gfp_t flags)
2112 {
2113 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2114 		flags &= ~(__GFP_IO | __GFP_FS);
2115 	return flags;
2116 }
2117 
2118 static inline unsigned int memalloc_noio_save(void)
2119 {
2120 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2121 	current->flags |= PF_MEMALLOC_NOIO;
2122 	return flags;
2123 }
2124 
2125 static inline void memalloc_noio_restore(unsigned int flags)
2126 {
2127 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2128 }
2129 
2130 /* Per-process atomic flags. */
2131 #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2132 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2133 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2134 
2135 
2136 #define TASK_PFA_TEST(name, func)					\
2137 	static inline bool task_##func(struct task_struct *p)		\
2138 	{ return test_bit(PFA_##name, &p->atomic_flags); }
2139 #define TASK_PFA_SET(name, func)					\
2140 	static inline void task_set_##func(struct task_struct *p)	\
2141 	{ set_bit(PFA_##name, &p->atomic_flags); }
2142 #define TASK_PFA_CLEAR(name, func)					\
2143 	static inline void task_clear_##func(struct task_struct *p)	\
2144 	{ clear_bit(PFA_##name, &p->atomic_flags); }
2145 
2146 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2147 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2148 
2149 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2150 TASK_PFA_SET(SPREAD_PAGE, spread_page)
2151 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2152 
2153 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2154 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2155 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2156 
2157 /*
2158  * task->jobctl flags
2159  */
2160 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2161 
2162 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
2163 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
2164 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2165 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2166 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2167 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
2168 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2169 
2170 #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
2171 #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
2172 #define JOBCTL_STOP_CONSUME	(1UL << JOBCTL_STOP_CONSUME_BIT)
2173 #define JOBCTL_TRAP_STOP	(1UL << JOBCTL_TRAP_STOP_BIT)
2174 #define JOBCTL_TRAP_NOTIFY	(1UL << JOBCTL_TRAP_NOTIFY_BIT)
2175 #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
2176 #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
2177 
2178 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2179 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2180 
2181 extern bool task_set_jobctl_pending(struct task_struct *task,
2182 				    unsigned long mask);
2183 extern void task_clear_jobctl_trapping(struct task_struct *task);
2184 extern void task_clear_jobctl_pending(struct task_struct *task,
2185 				      unsigned long mask);
2186 
2187 static inline void rcu_copy_process(struct task_struct *p)
2188 {
2189 #ifdef CONFIG_PREEMPT_RCU
2190 	p->rcu_read_lock_nesting = 0;
2191 	p->rcu_read_unlock_special.s = 0;
2192 	p->rcu_blocked_node = NULL;
2193 	INIT_LIST_HEAD(&p->rcu_node_entry);
2194 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2195 #ifdef CONFIG_TASKS_RCU
2196 	p->rcu_tasks_holdout = false;
2197 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2198 	p->rcu_tasks_idle_cpu = -1;
2199 #endif /* #ifdef CONFIG_TASKS_RCU */
2200 }
2201 
2202 static inline void tsk_restore_flags(struct task_struct *task,
2203 				unsigned long orig_flags, unsigned long flags)
2204 {
2205 	task->flags &= ~flags;
2206 	task->flags |= orig_flags & flags;
2207 }
2208 
2209 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2210 				     const struct cpumask *trial);
2211 extern int task_can_attach(struct task_struct *p,
2212 			   const struct cpumask *cs_cpus_allowed);
2213 #ifdef CONFIG_SMP
2214 extern void do_set_cpus_allowed(struct task_struct *p,
2215 			       const struct cpumask *new_mask);
2216 
2217 extern int set_cpus_allowed_ptr(struct task_struct *p,
2218 				const struct cpumask *new_mask);
2219 #else
2220 static inline void do_set_cpus_allowed(struct task_struct *p,
2221 				      const struct cpumask *new_mask)
2222 {
2223 }
2224 static inline int set_cpus_allowed_ptr(struct task_struct *p,
2225 				       const struct cpumask *new_mask)
2226 {
2227 	if (!cpumask_test_cpu(0, new_mask))
2228 		return -EINVAL;
2229 	return 0;
2230 }
2231 #endif
2232 
2233 #ifdef CONFIG_NO_HZ_COMMON
2234 void calc_load_enter_idle(void);
2235 void calc_load_exit_idle(void);
2236 #else
2237 static inline void calc_load_enter_idle(void) { }
2238 static inline void calc_load_exit_idle(void) { }
2239 #endif /* CONFIG_NO_HZ_COMMON */
2240 
2241 /*
2242  * Do not use outside of architecture code which knows its limitations.
2243  *
2244  * sched_clock() has no promise of monotonicity or bounded drift between
2245  * CPUs, use (which you should not) requires disabling IRQs.
2246  *
2247  * Please use one of the three interfaces below.
2248  */
2249 extern unsigned long long notrace sched_clock(void);
2250 /*
2251  * See the comment in kernel/sched/clock.c
2252  */
2253 extern u64 cpu_clock(int cpu);
2254 extern u64 local_clock(void);
2255 extern u64 running_clock(void);
2256 extern u64 sched_clock_cpu(int cpu);
2257 
2258 
2259 extern void sched_clock_init(void);
2260 
2261 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2262 static inline void sched_clock_tick(void)
2263 {
2264 }
2265 
2266 static inline void sched_clock_idle_sleep_event(void)
2267 {
2268 }
2269 
2270 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2271 {
2272 }
2273 #else
2274 /*
2275  * Architectures can set this to 1 if they have specified
2276  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2277  * but then during bootup it turns out that sched_clock()
2278  * is reliable after all:
2279  */
2280 extern int sched_clock_stable(void);
2281 extern void set_sched_clock_stable(void);
2282 extern void clear_sched_clock_stable(void);
2283 
2284 extern void sched_clock_tick(void);
2285 extern void sched_clock_idle_sleep_event(void);
2286 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2287 #endif
2288 
2289 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2290 /*
2291  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2292  * The reason for this explicit opt-in is not to have perf penalty with
2293  * slow sched_clocks.
2294  */
2295 extern void enable_sched_clock_irqtime(void);
2296 extern void disable_sched_clock_irqtime(void);
2297 #else
2298 static inline void enable_sched_clock_irqtime(void) {}
2299 static inline void disable_sched_clock_irqtime(void) {}
2300 #endif
2301 
2302 extern unsigned long long
2303 task_sched_runtime(struct task_struct *task);
2304 
2305 /* sched_exec is called by processes performing an exec */
2306 #ifdef CONFIG_SMP
2307 extern void sched_exec(void);
2308 #else
2309 #define sched_exec()   {}
2310 #endif
2311 
2312 extern void sched_clock_idle_sleep_event(void);
2313 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2314 
2315 #ifdef CONFIG_HOTPLUG_CPU
2316 extern void idle_task_exit(void);
2317 #else
2318 static inline void idle_task_exit(void) {}
2319 #endif
2320 
2321 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2322 extern void wake_up_nohz_cpu(int cpu);
2323 #else
2324 static inline void wake_up_nohz_cpu(int cpu) { }
2325 #endif
2326 
2327 #ifdef CONFIG_NO_HZ_FULL
2328 extern bool sched_can_stop_tick(void);
2329 extern u64 scheduler_tick_max_deferment(void);
2330 #else
2331 static inline bool sched_can_stop_tick(void) { return false; }
2332 #endif
2333 
2334 #ifdef CONFIG_SCHED_AUTOGROUP
2335 extern void sched_autogroup_create_attach(struct task_struct *p);
2336 extern void sched_autogroup_detach(struct task_struct *p);
2337 extern void sched_autogroup_fork(struct signal_struct *sig);
2338 extern void sched_autogroup_exit(struct signal_struct *sig);
2339 #ifdef CONFIG_PROC_FS
2340 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2341 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2342 #endif
2343 #else
2344 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2345 static inline void sched_autogroup_detach(struct task_struct *p) { }
2346 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2347 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2348 #endif
2349 
2350 extern int yield_to(struct task_struct *p, bool preempt);
2351 extern void set_user_nice(struct task_struct *p, long nice);
2352 extern int task_prio(const struct task_struct *p);
2353 /**
2354  * task_nice - return the nice value of a given task.
2355  * @p: the task in question.
2356  *
2357  * Return: The nice value [ -20 ... 0 ... 19 ].
2358  */
2359 static inline int task_nice(const struct task_struct *p)
2360 {
2361 	return PRIO_TO_NICE((p)->static_prio);
2362 }
2363 extern int can_nice(const struct task_struct *p, const int nice);
2364 extern int task_curr(const struct task_struct *p);
2365 extern int idle_cpu(int cpu);
2366 extern int sched_setscheduler(struct task_struct *, int,
2367 			      const struct sched_param *);
2368 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2369 				      const struct sched_param *);
2370 extern int sched_setattr(struct task_struct *,
2371 			 const struct sched_attr *);
2372 extern struct task_struct *idle_task(int cpu);
2373 /**
2374  * is_idle_task - is the specified task an idle task?
2375  * @p: the task in question.
2376  *
2377  * Return: 1 if @p is an idle task. 0 otherwise.
2378  */
2379 static inline bool is_idle_task(const struct task_struct *p)
2380 {
2381 	return p->pid == 0;
2382 }
2383 extern struct task_struct *curr_task(int cpu);
2384 extern void set_curr_task(int cpu, struct task_struct *p);
2385 
2386 void yield(void);
2387 
2388 union thread_union {
2389 	struct thread_info thread_info;
2390 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2391 };
2392 
2393 #ifndef __HAVE_ARCH_KSTACK_END
2394 static inline int kstack_end(void *addr)
2395 {
2396 	/* Reliable end of stack detection:
2397 	 * Some APM bios versions misalign the stack
2398 	 */
2399 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2400 }
2401 #endif
2402 
2403 extern union thread_union init_thread_union;
2404 extern struct task_struct init_task;
2405 
2406 extern struct   mm_struct init_mm;
2407 
2408 extern struct pid_namespace init_pid_ns;
2409 
2410 /*
2411  * find a task by one of its numerical ids
2412  *
2413  * find_task_by_pid_ns():
2414  *      finds a task by its pid in the specified namespace
2415  * find_task_by_vpid():
2416  *      finds a task by its virtual pid
2417  *
2418  * see also find_vpid() etc in include/linux/pid.h
2419  */
2420 
2421 extern struct task_struct *find_task_by_vpid(pid_t nr);
2422 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2423 		struct pid_namespace *ns);
2424 
2425 /* per-UID process charging. */
2426 extern struct user_struct * alloc_uid(kuid_t);
2427 static inline struct user_struct *get_uid(struct user_struct *u)
2428 {
2429 	atomic_inc(&u->__count);
2430 	return u;
2431 }
2432 extern void free_uid(struct user_struct *);
2433 
2434 #include <asm/current.h>
2435 
2436 extern void xtime_update(unsigned long ticks);
2437 
2438 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2439 extern int wake_up_process(struct task_struct *tsk);
2440 extern void wake_up_new_task(struct task_struct *tsk);
2441 #ifdef CONFIG_SMP
2442  extern void kick_process(struct task_struct *tsk);
2443 #else
2444  static inline void kick_process(struct task_struct *tsk) { }
2445 #endif
2446 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2447 extern void sched_dead(struct task_struct *p);
2448 
2449 extern void proc_caches_init(void);
2450 extern void flush_signals(struct task_struct *);
2451 extern void ignore_signals(struct task_struct *);
2452 extern void flush_signal_handlers(struct task_struct *, int force_default);
2453 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2454 
2455 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2456 {
2457 	unsigned long flags;
2458 	int ret;
2459 
2460 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2461 	ret = dequeue_signal(tsk, mask, info);
2462 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2463 
2464 	return ret;
2465 }
2466 
2467 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2468 			      sigset_t *mask);
2469 extern void unblock_all_signals(void);
2470 extern void release_task(struct task_struct * p);
2471 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2472 extern int force_sigsegv(int, struct task_struct *);
2473 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2474 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2475 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2476 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2477 				const struct cred *, u32);
2478 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2479 extern int kill_pid(struct pid *pid, int sig, int priv);
2480 extern int kill_proc_info(int, struct siginfo *, pid_t);
2481 extern __must_check bool do_notify_parent(struct task_struct *, int);
2482 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2483 extern void force_sig(int, struct task_struct *);
2484 extern int send_sig(int, struct task_struct *, int);
2485 extern int zap_other_threads(struct task_struct *p);
2486 extern struct sigqueue *sigqueue_alloc(void);
2487 extern void sigqueue_free(struct sigqueue *);
2488 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2489 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2490 
2491 static inline void restore_saved_sigmask(void)
2492 {
2493 	if (test_and_clear_restore_sigmask())
2494 		__set_current_blocked(&current->saved_sigmask);
2495 }
2496 
2497 static inline sigset_t *sigmask_to_save(void)
2498 {
2499 	sigset_t *res = &current->blocked;
2500 	if (unlikely(test_restore_sigmask()))
2501 		res = &current->saved_sigmask;
2502 	return res;
2503 }
2504 
2505 static inline int kill_cad_pid(int sig, int priv)
2506 {
2507 	return kill_pid(cad_pid, sig, priv);
2508 }
2509 
2510 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2511 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2512 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2513 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2514 
2515 /*
2516  * True if we are on the alternate signal stack.
2517  */
2518 static inline int on_sig_stack(unsigned long sp)
2519 {
2520 #ifdef CONFIG_STACK_GROWSUP
2521 	return sp >= current->sas_ss_sp &&
2522 		sp - current->sas_ss_sp < current->sas_ss_size;
2523 #else
2524 	return sp > current->sas_ss_sp &&
2525 		sp - current->sas_ss_sp <= current->sas_ss_size;
2526 #endif
2527 }
2528 
2529 static inline int sas_ss_flags(unsigned long sp)
2530 {
2531 	if (!current->sas_ss_size)
2532 		return SS_DISABLE;
2533 
2534 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
2535 }
2536 
2537 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2538 {
2539 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2540 #ifdef CONFIG_STACK_GROWSUP
2541 		return current->sas_ss_sp;
2542 #else
2543 		return current->sas_ss_sp + current->sas_ss_size;
2544 #endif
2545 	return sp;
2546 }
2547 
2548 /*
2549  * Routines for handling mm_structs
2550  */
2551 extern struct mm_struct * mm_alloc(void);
2552 
2553 /* mmdrop drops the mm and the page tables */
2554 extern void __mmdrop(struct mm_struct *);
2555 static inline void mmdrop(struct mm_struct * mm)
2556 {
2557 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2558 		__mmdrop(mm);
2559 }
2560 
2561 /* mmput gets rid of the mappings and all user-space */
2562 extern void mmput(struct mm_struct *);
2563 /* Grab a reference to a task's mm, if it is not already going away */
2564 extern struct mm_struct *get_task_mm(struct task_struct *task);
2565 /*
2566  * Grab a reference to a task's mm, if it is not already going away
2567  * and ptrace_may_access with the mode parameter passed to it
2568  * succeeds.
2569  */
2570 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2571 /* Remove the current tasks stale references to the old mm_struct */
2572 extern void mm_release(struct task_struct *, struct mm_struct *);
2573 
2574 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
2575 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2576 			struct task_struct *, unsigned long);
2577 #else
2578 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2579 			struct task_struct *);
2580 
2581 /* Architectures that haven't opted into copy_thread_tls get the tls argument
2582  * via pt_regs, so ignore the tls argument passed via C. */
2583 static inline int copy_thread_tls(
2584 		unsigned long clone_flags, unsigned long sp, unsigned long arg,
2585 		struct task_struct *p, unsigned long tls)
2586 {
2587 	return copy_thread(clone_flags, sp, arg, p);
2588 }
2589 #endif
2590 extern void flush_thread(void);
2591 extern void exit_thread(void);
2592 
2593 extern void exit_files(struct task_struct *);
2594 extern void __cleanup_sighand(struct sighand_struct *);
2595 
2596 extern void exit_itimers(struct signal_struct *);
2597 extern void flush_itimer_signals(void);
2598 
2599 extern void do_group_exit(int);
2600 
2601 extern int do_execve(struct filename *,
2602 		     const char __user * const __user *,
2603 		     const char __user * const __user *);
2604 extern int do_execveat(int, struct filename *,
2605 		       const char __user * const __user *,
2606 		       const char __user * const __user *,
2607 		       int);
2608 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2609 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2610 struct task_struct *fork_idle(int);
2611 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2612 
2613 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2614 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2615 {
2616 	__set_task_comm(tsk, from, false);
2617 }
2618 extern char *get_task_comm(char *to, struct task_struct *tsk);
2619 
2620 #ifdef CONFIG_SMP
2621 void scheduler_ipi(void);
2622 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2623 #else
2624 static inline void scheduler_ipi(void) { }
2625 static inline unsigned long wait_task_inactive(struct task_struct *p,
2626 					       long match_state)
2627 {
2628 	return 1;
2629 }
2630 #endif
2631 
2632 #define tasklist_empty() \
2633 	list_empty(&init_task.tasks)
2634 
2635 #define next_task(p) \
2636 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2637 
2638 #define for_each_process(p) \
2639 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2640 
2641 extern bool current_is_single_threaded(void);
2642 
2643 /*
2644  * Careful: do_each_thread/while_each_thread is a double loop so
2645  *          'break' will not work as expected - use goto instead.
2646  */
2647 #define do_each_thread(g, t) \
2648 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2649 
2650 #define while_each_thread(g, t) \
2651 	while ((t = next_thread(t)) != g)
2652 
2653 #define __for_each_thread(signal, t)	\
2654 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2655 
2656 #define for_each_thread(p, t)		\
2657 	__for_each_thread((p)->signal, t)
2658 
2659 /* Careful: this is a double loop, 'break' won't work as expected. */
2660 #define for_each_process_thread(p, t)	\
2661 	for_each_process(p) for_each_thread(p, t)
2662 
2663 static inline int get_nr_threads(struct task_struct *tsk)
2664 {
2665 	return tsk->signal->nr_threads;
2666 }
2667 
2668 static inline bool thread_group_leader(struct task_struct *p)
2669 {
2670 	return p->exit_signal >= 0;
2671 }
2672 
2673 /* Do to the insanities of de_thread it is possible for a process
2674  * to have the pid of the thread group leader without actually being
2675  * the thread group leader.  For iteration through the pids in proc
2676  * all we care about is that we have a task with the appropriate
2677  * pid, we don't actually care if we have the right task.
2678  */
2679 static inline bool has_group_leader_pid(struct task_struct *p)
2680 {
2681 	return task_pid(p) == p->signal->leader_pid;
2682 }
2683 
2684 static inline
2685 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2686 {
2687 	return p1->signal == p2->signal;
2688 }
2689 
2690 static inline struct task_struct *next_thread(const struct task_struct *p)
2691 {
2692 	return list_entry_rcu(p->thread_group.next,
2693 			      struct task_struct, thread_group);
2694 }
2695 
2696 static inline int thread_group_empty(struct task_struct *p)
2697 {
2698 	return list_empty(&p->thread_group);
2699 }
2700 
2701 #define delay_group_leader(p) \
2702 		(thread_group_leader(p) && !thread_group_empty(p))
2703 
2704 /*
2705  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2706  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2707  * pins the final release of task.io_context.  Also protects ->cpuset and
2708  * ->cgroup.subsys[]. And ->vfork_done.
2709  *
2710  * Nests both inside and outside of read_lock(&tasklist_lock).
2711  * It must not be nested with write_lock_irq(&tasklist_lock),
2712  * neither inside nor outside.
2713  */
2714 static inline void task_lock(struct task_struct *p)
2715 {
2716 	spin_lock(&p->alloc_lock);
2717 }
2718 
2719 static inline void task_unlock(struct task_struct *p)
2720 {
2721 	spin_unlock(&p->alloc_lock);
2722 }
2723 
2724 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2725 							unsigned long *flags);
2726 
2727 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2728 						       unsigned long *flags)
2729 {
2730 	struct sighand_struct *ret;
2731 
2732 	ret = __lock_task_sighand(tsk, flags);
2733 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2734 	return ret;
2735 }
2736 
2737 static inline void unlock_task_sighand(struct task_struct *tsk,
2738 						unsigned long *flags)
2739 {
2740 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2741 }
2742 
2743 /**
2744  * threadgroup_change_begin - mark the beginning of changes to a threadgroup
2745  * @tsk: task causing the changes
2746  *
2747  * All operations which modify a threadgroup - a new thread joining the
2748  * group, death of a member thread (the assertion of PF_EXITING) and
2749  * exec(2) dethreading the process and replacing the leader - are wrapped
2750  * by threadgroup_change_{begin|end}().  This is to provide a place which
2751  * subsystems needing threadgroup stability can hook into for
2752  * synchronization.
2753  */
2754 static inline void threadgroup_change_begin(struct task_struct *tsk)
2755 {
2756 	might_sleep();
2757 	cgroup_threadgroup_change_begin(tsk);
2758 }
2759 
2760 /**
2761  * threadgroup_change_end - mark the end of changes to a threadgroup
2762  * @tsk: task causing the changes
2763  *
2764  * See threadgroup_change_begin().
2765  */
2766 static inline void threadgroup_change_end(struct task_struct *tsk)
2767 {
2768 	cgroup_threadgroup_change_end(tsk);
2769 }
2770 
2771 #ifndef __HAVE_THREAD_FUNCTIONS
2772 
2773 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2774 #define task_stack_page(task)	((task)->stack)
2775 
2776 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2777 {
2778 	*task_thread_info(p) = *task_thread_info(org);
2779 	task_thread_info(p)->task = p;
2780 }
2781 
2782 /*
2783  * Return the address of the last usable long on the stack.
2784  *
2785  * When the stack grows down, this is just above the thread
2786  * info struct. Going any lower will corrupt the threadinfo.
2787  *
2788  * When the stack grows up, this is the highest address.
2789  * Beyond that position, we corrupt data on the next page.
2790  */
2791 static inline unsigned long *end_of_stack(struct task_struct *p)
2792 {
2793 #ifdef CONFIG_STACK_GROWSUP
2794 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2795 #else
2796 	return (unsigned long *)(task_thread_info(p) + 1);
2797 #endif
2798 }
2799 
2800 #endif
2801 #define task_stack_end_corrupted(task) \
2802 		(*(end_of_stack(task)) != STACK_END_MAGIC)
2803 
2804 static inline int object_is_on_stack(void *obj)
2805 {
2806 	void *stack = task_stack_page(current);
2807 
2808 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2809 }
2810 
2811 extern void thread_info_cache_init(void);
2812 
2813 #ifdef CONFIG_DEBUG_STACK_USAGE
2814 static inline unsigned long stack_not_used(struct task_struct *p)
2815 {
2816 	unsigned long *n = end_of_stack(p);
2817 
2818 	do { 	/* Skip over canary */
2819 		n++;
2820 	} while (!*n);
2821 
2822 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2823 }
2824 #endif
2825 extern void set_task_stack_end_magic(struct task_struct *tsk);
2826 
2827 /* set thread flags in other task's structures
2828  * - see asm/thread_info.h for TIF_xxxx flags available
2829  */
2830 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2831 {
2832 	set_ti_thread_flag(task_thread_info(tsk), flag);
2833 }
2834 
2835 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2836 {
2837 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2838 }
2839 
2840 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2841 {
2842 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2843 }
2844 
2845 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2846 {
2847 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2848 }
2849 
2850 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2851 {
2852 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2853 }
2854 
2855 static inline void set_tsk_need_resched(struct task_struct *tsk)
2856 {
2857 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2858 }
2859 
2860 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2861 {
2862 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2863 }
2864 
2865 static inline int test_tsk_need_resched(struct task_struct *tsk)
2866 {
2867 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2868 }
2869 
2870 static inline int restart_syscall(void)
2871 {
2872 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2873 	return -ERESTARTNOINTR;
2874 }
2875 
2876 static inline int signal_pending(struct task_struct *p)
2877 {
2878 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2879 }
2880 
2881 static inline int __fatal_signal_pending(struct task_struct *p)
2882 {
2883 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2884 }
2885 
2886 static inline int fatal_signal_pending(struct task_struct *p)
2887 {
2888 	return signal_pending(p) && __fatal_signal_pending(p);
2889 }
2890 
2891 static inline int signal_pending_state(long state, struct task_struct *p)
2892 {
2893 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2894 		return 0;
2895 	if (!signal_pending(p))
2896 		return 0;
2897 
2898 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2899 }
2900 
2901 /*
2902  * cond_resched() and cond_resched_lock(): latency reduction via
2903  * explicit rescheduling in places that are safe. The return
2904  * value indicates whether a reschedule was done in fact.
2905  * cond_resched_lock() will drop the spinlock before scheduling,
2906  * cond_resched_softirq() will enable bhs before scheduling.
2907  */
2908 extern int _cond_resched(void);
2909 
2910 #define cond_resched() ({			\
2911 	___might_sleep(__FILE__, __LINE__, 0);	\
2912 	_cond_resched();			\
2913 })
2914 
2915 extern int __cond_resched_lock(spinlock_t *lock);
2916 
2917 #define cond_resched_lock(lock) ({				\
2918 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2919 	__cond_resched_lock(lock);				\
2920 })
2921 
2922 extern int __cond_resched_softirq(void);
2923 
2924 #define cond_resched_softirq() ({					\
2925 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2926 	__cond_resched_softirq();					\
2927 })
2928 
2929 static inline void cond_resched_rcu(void)
2930 {
2931 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2932 	rcu_read_unlock();
2933 	cond_resched();
2934 	rcu_read_lock();
2935 #endif
2936 }
2937 
2938 /*
2939  * Does a critical section need to be broken due to another
2940  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2941  * but a general need for low latency)
2942  */
2943 static inline int spin_needbreak(spinlock_t *lock)
2944 {
2945 #ifdef CONFIG_PREEMPT
2946 	return spin_is_contended(lock);
2947 #else
2948 	return 0;
2949 #endif
2950 }
2951 
2952 /*
2953  * Idle thread specific functions to determine the need_resched
2954  * polling state.
2955  */
2956 #ifdef TIF_POLLING_NRFLAG
2957 static inline int tsk_is_polling(struct task_struct *p)
2958 {
2959 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2960 }
2961 
2962 static inline void __current_set_polling(void)
2963 {
2964 	set_thread_flag(TIF_POLLING_NRFLAG);
2965 }
2966 
2967 static inline bool __must_check current_set_polling_and_test(void)
2968 {
2969 	__current_set_polling();
2970 
2971 	/*
2972 	 * Polling state must be visible before we test NEED_RESCHED,
2973 	 * paired by resched_curr()
2974 	 */
2975 	smp_mb__after_atomic();
2976 
2977 	return unlikely(tif_need_resched());
2978 }
2979 
2980 static inline void __current_clr_polling(void)
2981 {
2982 	clear_thread_flag(TIF_POLLING_NRFLAG);
2983 }
2984 
2985 static inline bool __must_check current_clr_polling_and_test(void)
2986 {
2987 	__current_clr_polling();
2988 
2989 	/*
2990 	 * Polling state must be visible before we test NEED_RESCHED,
2991 	 * paired by resched_curr()
2992 	 */
2993 	smp_mb__after_atomic();
2994 
2995 	return unlikely(tif_need_resched());
2996 }
2997 
2998 #else
2999 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3000 static inline void __current_set_polling(void) { }
3001 static inline void __current_clr_polling(void) { }
3002 
3003 static inline bool __must_check current_set_polling_and_test(void)
3004 {
3005 	return unlikely(tif_need_resched());
3006 }
3007 static inline bool __must_check current_clr_polling_and_test(void)
3008 {
3009 	return unlikely(tif_need_resched());
3010 }
3011 #endif
3012 
3013 static inline void current_clr_polling(void)
3014 {
3015 	__current_clr_polling();
3016 
3017 	/*
3018 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
3019 	 * Once the bit is cleared, we'll get IPIs with every new
3020 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
3021 	 * fold.
3022 	 */
3023 	smp_mb(); /* paired with resched_curr() */
3024 
3025 	preempt_fold_need_resched();
3026 }
3027 
3028 static __always_inline bool need_resched(void)
3029 {
3030 	return unlikely(tif_need_resched());
3031 }
3032 
3033 /*
3034  * Thread group CPU time accounting.
3035  */
3036 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3037 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3038 
3039 /*
3040  * Reevaluate whether the task has signals pending delivery.
3041  * Wake the task if so.
3042  * This is required every time the blocked sigset_t changes.
3043  * callers must hold sighand->siglock.
3044  */
3045 extern void recalc_sigpending_and_wake(struct task_struct *t);
3046 extern void recalc_sigpending(void);
3047 
3048 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3049 
3050 static inline void signal_wake_up(struct task_struct *t, bool resume)
3051 {
3052 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3053 }
3054 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3055 {
3056 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3057 }
3058 
3059 /*
3060  * Wrappers for p->thread_info->cpu access. No-op on UP.
3061  */
3062 #ifdef CONFIG_SMP
3063 
3064 static inline unsigned int task_cpu(const struct task_struct *p)
3065 {
3066 	return task_thread_info(p)->cpu;
3067 }
3068 
3069 static inline int task_node(const struct task_struct *p)
3070 {
3071 	return cpu_to_node(task_cpu(p));
3072 }
3073 
3074 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3075 
3076 #else
3077 
3078 static inline unsigned int task_cpu(const struct task_struct *p)
3079 {
3080 	return 0;
3081 }
3082 
3083 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3084 {
3085 }
3086 
3087 #endif /* CONFIG_SMP */
3088 
3089 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3090 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3091 
3092 #ifdef CONFIG_CGROUP_SCHED
3093 extern struct task_group root_task_group;
3094 #endif /* CONFIG_CGROUP_SCHED */
3095 
3096 extern int task_can_switch_user(struct user_struct *up,
3097 					struct task_struct *tsk);
3098 
3099 #ifdef CONFIG_TASK_XACCT
3100 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3101 {
3102 	tsk->ioac.rchar += amt;
3103 }
3104 
3105 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3106 {
3107 	tsk->ioac.wchar += amt;
3108 }
3109 
3110 static inline void inc_syscr(struct task_struct *tsk)
3111 {
3112 	tsk->ioac.syscr++;
3113 }
3114 
3115 static inline void inc_syscw(struct task_struct *tsk)
3116 {
3117 	tsk->ioac.syscw++;
3118 }
3119 #else
3120 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3121 {
3122 }
3123 
3124 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3125 {
3126 }
3127 
3128 static inline void inc_syscr(struct task_struct *tsk)
3129 {
3130 }
3131 
3132 static inline void inc_syscw(struct task_struct *tsk)
3133 {
3134 }
3135 #endif
3136 
3137 #ifndef TASK_SIZE_OF
3138 #define TASK_SIZE_OF(tsk)	TASK_SIZE
3139 #endif
3140 
3141 #ifdef CONFIG_MEMCG
3142 extern void mm_update_next_owner(struct mm_struct *mm);
3143 #else
3144 static inline void mm_update_next_owner(struct mm_struct *mm)
3145 {
3146 }
3147 #endif /* CONFIG_MEMCG */
3148 
3149 static inline unsigned long task_rlimit(const struct task_struct *tsk,
3150 		unsigned int limit)
3151 {
3152 	return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3153 }
3154 
3155 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3156 		unsigned int limit)
3157 {
3158 	return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3159 }
3160 
3161 static inline unsigned long rlimit(unsigned int limit)
3162 {
3163 	return task_rlimit(current, limit);
3164 }
3165 
3166 static inline unsigned long rlimit_max(unsigned int limit)
3167 {
3168 	return task_rlimit_max(current, limit);
3169 }
3170 
3171 #endif
3172