xref: /linux/include/linux/sched.h (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 #include <linux/sched/prio.h>
7 
8 
9 struct sched_param {
10 	int sched_priority;
11 };
12 
13 #include <asm/param.h>	/* for HZ */
14 
15 #include <linux/capability.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/timex.h>
20 #include <linux/jiffies.h>
21 #include <linux/plist.h>
22 #include <linux/rbtree.h>
23 #include <linux/thread_info.h>
24 #include <linux/cpumask.h>
25 #include <linux/errno.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm_types.h>
28 #include <linux/preempt.h>
29 
30 #include <asm/page.h>
31 #include <asm/ptrace.h>
32 #include <linux/cputime.h>
33 
34 #include <linux/smp.h>
35 #include <linux/sem.h>
36 #include <linux/shm.h>
37 #include <linux/signal.h>
38 #include <linux/compiler.h>
39 #include <linux/completion.h>
40 #include <linux/pid.h>
41 #include <linux/percpu.h>
42 #include <linux/topology.h>
43 #include <linux/seccomp.h>
44 #include <linux/rcupdate.h>
45 #include <linux/rculist.h>
46 #include <linux/rtmutex.h>
47 
48 #include <linux/time.h>
49 #include <linux/param.h>
50 #include <linux/resource.h>
51 #include <linux/timer.h>
52 #include <linux/hrtimer.h>
53 #include <linux/kcov.h>
54 #include <linux/task_io_accounting.h>
55 #include <linux/latencytop.h>
56 #include <linux/cred.h>
57 #include <linux/llist.h>
58 #include <linux/uidgid.h>
59 #include <linux/gfp.h>
60 #include <linux/magic.h>
61 #include <linux/cgroup-defs.h>
62 
63 #include <asm/processor.h>
64 
65 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
66 
67 /*
68  * Extended scheduling parameters data structure.
69  *
70  * This is needed because the original struct sched_param can not be
71  * altered without introducing ABI issues with legacy applications
72  * (e.g., in sched_getparam()).
73  *
74  * However, the possibility of specifying more than just a priority for
75  * the tasks may be useful for a wide variety of application fields, e.g.,
76  * multimedia, streaming, automation and control, and many others.
77  *
78  * This variant (sched_attr) is meant at describing a so-called
79  * sporadic time-constrained task. In such model a task is specified by:
80  *  - the activation period or minimum instance inter-arrival time;
81  *  - the maximum (or average, depending on the actual scheduling
82  *    discipline) computation time of all instances, a.k.a. runtime;
83  *  - the deadline (relative to the actual activation time) of each
84  *    instance.
85  * Very briefly, a periodic (sporadic) task asks for the execution of
86  * some specific computation --which is typically called an instance--
87  * (at most) every period. Moreover, each instance typically lasts no more
88  * than the runtime and must be completed by time instant t equal to
89  * the instance activation time + the deadline.
90  *
91  * This is reflected by the actual fields of the sched_attr structure:
92  *
93  *  @size		size of the structure, for fwd/bwd compat.
94  *
95  *  @sched_policy	task's scheduling policy
96  *  @sched_flags	for customizing the scheduler behaviour
97  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
98  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
99  *  @sched_deadline	representative of the task's deadline
100  *  @sched_runtime	representative of the task's runtime
101  *  @sched_period	representative of the task's period
102  *
103  * Given this task model, there are a multiplicity of scheduling algorithms
104  * and policies, that can be used to ensure all the tasks will make their
105  * timing constraints.
106  *
107  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
108  * only user of this new interface. More information about the algorithm
109  * available in the scheduling class file or in Documentation/.
110  */
111 struct sched_attr {
112 	u32 size;
113 
114 	u32 sched_policy;
115 	u64 sched_flags;
116 
117 	/* SCHED_NORMAL, SCHED_BATCH */
118 	s32 sched_nice;
119 
120 	/* SCHED_FIFO, SCHED_RR */
121 	u32 sched_priority;
122 
123 	/* SCHED_DEADLINE */
124 	u64 sched_runtime;
125 	u64 sched_deadline;
126 	u64 sched_period;
127 };
128 
129 struct futex_pi_state;
130 struct robust_list_head;
131 struct bio_list;
132 struct fs_struct;
133 struct perf_event_context;
134 struct blk_plug;
135 struct filename;
136 struct nameidata;
137 
138 #define VMACACHE_BITS 2
139 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
140 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
141 
142 /*
143  * These are the constant used to fake the fixed-point load-average
144  * counting. Some notes:
145  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
146  *    a load-average precision of 10 bits integer + 11 bits fractional
147  *  - if you want to count load-averages more often, you need more
148  *    precision, or rounding will get you. With 2-second counting freq,
149  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
150  *    11 bit fractions.
151  */
152 extern unsigned long avenrun[];		/* Load averages */
153 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154 
155 #define FSHIFT		11		/* nr of bits of precision */
156 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
157 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
158 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
159 #define EXP_5		2014		/* 1/exp(5sec/5min) */
160 #define EXP_15		2037		/* 1/exp(5sec/15min) */
161 
162 #define CALC_LOAD(load,exp,n) \
163 	load *= exp; \
164 	load += n*(FIXED_1-exp); \
165 	load >>= FSHIFT;
166 
167 extern unsigned long total_forks;
168 extern int nr_threads;
169 DECLARE_PER_CPU(unsigned long, process_counts);
170 extern int nr_processes(void);
171 extern unsigned long nr_running(void);
172 extern bool single_task_running(void);
173 extern unsigned long nr_iowait(void);
174 extern unsigned long nr_iowait_cpu(int cpu);
175 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176 
177 extern void calc_global_load(unsigned long ticks);
178 
179 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180 extern void cpu_load_update_nohz_start(void);
181 extern void cpu_load_update_nohz_stop(void);
182 #else
183 static inline void cpu_load_update_nohz_start(void) { }
184 static inline void cpu_load_update_nohz_stop(void) { }
185 #endif
186 
187 extern void dump_cpu_task(int cpu);
188 
189 struct seq_file;
190 struct cfs_rq;
191 struct task_group;
192 #ifdef CONFIG_SCHED_DEBUG
193 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194 extern void proc_sched_set_task(struct task_struct *p);
195 #endif
196 
197 /*
198  * Task state bitmask. NOTE! These bits are also
199  * encoded in fs/proc/array.c: get_task_state().
200  *
201  * We have two separate sets of flags: task->state
202  * is about runnability, while task->exit_state are
203  * about the task exiting. Confusing, but this way
204  * modifying one set can't modify the other one by
205  * mistake.
206  */
207 #define TASK_RUNNING		0
208 #define TASK_INTERRUPTIBLE	1
209 #define TASK_UNINTERRUPTIBLE	2
210 #define __TASK_STOPPED		4
211 #define __TASK_TRACED		8
212 /* in tsk->exit_state */
213 #define EXIT_DEAD		16
214 #define EXIT_ZOMBIE		32
215 #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
216 /* in tsk->state again */
217 #define TASK_DEAD		64
218 #define TASK_WAKEKILL		128
219 #define TASK_WAKING		256
220 #define TASK_PARKED		512
221 #define TASK_NOLOAD		1024
222 #define TASK_NEW		2048
223 #define TASK_STATE_MAX		4096
224 
225 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
226 
227 extern char ___assert_task_state[1 - 2*!!(
228 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
229 
230 /* Convenience macros for the sake of set_task_state */
231 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
233 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
234 
235 #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236 
237 /* Convenience macros for the sake of wake_up */
238 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
239 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
240 
241 /* get_task_state() */
242 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
243 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
244 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
245 
246 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
247 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
248 #define task_is_stopped_or_traced(task)	\
249 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
250 #define task_contributes_to_load(task)	\
251 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
252 				 (task->flags & PF_FROZEN) == 0 && \
253 				 (task->state & TASK_NOLOAD) == 0)
254 
255 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256 
257 #define __set_task_state(tsk, state_value)			\
258 	do {							\
259 		(tsk)->task_state_change = _THIS_IP_;		\
260 		(tsk)->state = (state_value);			\
261 	} while (0)
262 #define set_task_state(tsk, state_value)			\
263 	do {							\
264 		(tsk)->task_state_change = _THIS_IP_;		\
265 		smp_store_mb((tsk)->state, (state_value));		\
266 	} while (0)
267 
268 /*
269  * set_current_state() includes a barrier so that the write of current->state
270  * is correctly serialised wrt the caller's subsequent test of whether to
271  * actually sleep:
272  *
273  *	set_current_state(TASK_UNINTERRUPTIBLE);
274  *	if (do_i_need_to_sleep())
275  *		schedule();
276  *
277  * If the caller does not need such serialisation then use __set_current_state()
278  */
279 #define __set_current_state(state_value)			\
280 	do {							\
281 		current->task_state_change = _THIS_IP_;		\
282 		current->state = (state_value);			\
283 	} while (0)
284 #define set_current_state(state_value)				\
285 	do {							\
286 		current->task_state_change = _THIS_IP_;		\
287 		smp_store_mb(current->state, (state_value));		\
288 	} while (0)
289 
290 #else
291 
292 #define __set_task_state(tsk, state_value)		\
293 	do { (tsk)->state = (state_value); } while (0)
294 #define set_task_state(tsk, state_value)		\
295 	smp_store_mb((tsk)->state, (state_value))
296 
297 /*
298  * set_current_state() includes a barrier so that the write of current->state
299  * is correctly serialised wrt the caller's subsequent test of whether to
300  * actually sleep:
301  *
302  *	set_current_state(TASK_UNINTERRUPTIBLE);
303  *	if (do_i_need_to_sleep())
304  *		schedule();
305  *
306  * If the caller does not need such serialisation then use __set_current_state()
307  */
308 #define __set_current_state(state_value)		\
309 	do { current->state = (state_value); } while (0)
310 #define set_current_state(state_value)			\
311 	smp_store_mb(current->state, (state_value))
312 
313 #endif
314 
315 /* Task command name length */
316 #define TASK_COMM_LEN 16
317 
318 #include <linux/spinlock.h>
319 
320 /*
321  * This serializes "schedule()" and also protects
322  * the run-queue from deletions/modifications (but
323  * _adding_ to the beginning of the run-queue has
324  * a separate lock).
325  */
326 extern rwlock_t tasklist_lock;
327 extern spinlock_t mmlist_lock;
328 
329 struct task_struct;
330 
331 #ifdef CONFIG_PROVE_RCU
332 extern int lockdep_tasklist_lock_is_held(void);
333 #endif /* #ifdef CONFIG_PROVE_RCU */
334 
335 extern void sched_init(void);
336 extern void sched_init_smp(void);
337 extern asmlinkage void schedule_tail(struct task_struct *prev);
338 extern void init_idle(struct task_struct *idle, int cpu);
339 extern void init_idle_bootup_task(struct task_struct *idle);
340 
341 extern cpumask_var_t cpu_isolated_map;
342 
343 extern int runqueue_is_locked(int cpu);
344 
345 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
346 extern void nohz_balance_enter_idle(int cpu);
347 extern void set_cpu_sd_state_idle(void);
348 extern int get_nohz_timer_target(void);
349 #else
350 static inline void nohz_balance_enter_idle(int cpu) { }
351 static inline void set_cpu_sd_state_idle(void) { }
352 #endif
353 
354 /*
355  * Only dump TASK_* tasks. (0 for all tasks)
356  */
357 extern void show_state_filter(unsigned long state_filter);
358 
359 static inline void show_state(void)
360 {
361 	show_state_filter(0);
362 }
363 
364 extern void show_regs(struct pt_regs *);
365 
366 /*
367  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
368  * task), SP is the stack pointer of the first frame that should be shown in the back
369  * trace (or NULL if the entire call-chain of the task should be shown).
370  */
371 extern void show_stack(struct task_struct *task, unsigned long *sp);
372 
373 extern void cpu_init (void);
374 extern void trap_init(void);
375 extern void update_process_times(int user);
376 extern void scheduler_tick(void);
377 extern int sched_cpu_starting(unsigned int cpu);
378 extern int sched_cpu_activate(unsigned int cpu);
379 extern int sched_cpu_deactivate(unsigned int cpu);
380 
381 #ifdef CONFIG_HOTPLUG_CPU
382 extern int sched_cpu_dying(unsigned int cpu);
383 #else
384 # define sched_cpu_dying	NULL
385 #endif
386 
387 extern void sched_show_task(struct task_struct *p);
388 
389 #ifdef CONFIG_LOCKUP_DETECTOR
390 extern void touch_softlockup_watchdog_sched(void);
391 extern void touch_softlockup_watchdog(void);
392 extern void touch_softlockup_watchdog_sync(void);
393 extern void touch_all_softlockup_watchdogs(void);
394 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
395 				  void __user *buffer,
396 				  size_t *lenp, loff_t *ppos);
397 extern unsigned int  softlockup_panic;
398 extern unsigned int  hardlockup_panic;
399 void lockup_detector_init(void);
400 #else
401 static inline void touch_softlockup_watchdog_sched(void)
402 {
403 }
404 static inline void touch_softlockup_watchdog(void)
405 {
406 }
407 static inline void touch_softlockup_watchdog_sync(void)
408 {
409 }
410 static inline void touch_all_softlockup_watchdogs(void)
411 {
412 }
413 static inline void lockup_detector_init(void)
414 {
415 }
416 #endif
417 
418 #ifdef CONFIG_DETECT_HUNG_TASK
419 void reset_hung_task_detector(void);
420 #else
421 static inline void reset_hung_task_detector(void)
422 {
423 }
424 #endif
425 
426 /* Attach to any functions which should be ignored in wchan output. */
427 #define __sched		__attribute__((__section__(".sched.text")))
428 
429 /* Linker adds these: start and end of __sched functions */
430 extern char __sched_text_start[], __sched_text_end[];
431 
432 /* Is this address in the __sched functions? */
433 extern int in_sched_functions(unsigned long addr);
434 
435 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
436 extern signed long schedule_timeout(signed long timeout);
437 extern signed long schedule_timeout_interruptible(signed long timeout);
438 extern signed long schedule_timeout_killable(signed long timeout);
439 extern signed long schedule_timeout_uninterruptible(signed long timeout);
440 extern signed long schedule_timeout_idle(signed long timeout);
441 asmlinkage void schedule(void);
442 extern void schedule_preempt_disabled(void);
443 
444 extern long io_schedule_timeout(long timeout);
445 
446 static inline void io_schedule(void)
447 {
448 	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
449 }
450 
451 struct nsproxy;
452 struct user_namespace;
453 
454 #ifdef CONFIG_MMU
455 extern void arch_pick_mmap_layout(struct mm_struct *mm);
456 extern unsigned long
457 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
458 		       unsigned long, unsigned long);
459 extern unsigned long
460 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
461 			  unsigned long len, unsigned long pgoff,
462 			  unsigned long flags);
463 #else
464 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
465 #endif
466 
467 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
468 #define SUID_DUMP_USER		1	/* Dump as user of process */
469 #define SUID_DUMP_ROOT		2	/* Dump as root */
470 
471 /* mm flags */
472 
473 /* for SUID_DUMP_* above */
474 #define MMF_DUMPABLE_BITS 2
475 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
476 
477 extern void set_dumpable(struct mm_struct *mm, int value);
478 /*
479  * This returns the actual value of the suid_dumpable flag. For things
480  * that are using this for checking for privilege transitions, it must
481  * test against SUID_DUMP_USER rather than treating it as a boolean
482  * value.
483  */
484 static inline int __get_dumpable(unsigned long mm_flags)
485 {
486 	return mm_flags & MMF_DUMPABLE_MASK;
487 }
488 
489 static inline int get_dumpable(struct mm_struct *mm)
490 {
491 	return __get_dumpable(mm->flags);
492 }
493 
494 /* coredump filter bits */
495 #define MMF_DUMP_ANON_PRIVATE	2
496 #define MMF_DUMP_ANON_SHARED	3
497 #define MMF_DUMP_MAPPED_PRIVATE	4
498 #define MMF_DUMP_MAPPED_SHARED	5
499 #define MMF_DUMP_ELF_HEADERS	6
500 #define MMF_DUMP_HUGETLB_PRIVATE 7
501 #define MMF_DUMP_HUGETLB_SHARED  8
502 #define MMF_DUMP_DAX_PRIVATE	9
503 #define MMF_DUMP_DAX_SHARED	10
504 
505 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
506 #define MMF_DUMP_FILTER_BITS	9
507 #define MMF_DUMP_FILTER_MASK \
508 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
509 #define MMF_DUMP_FILTER_DEFAULT \
510 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
511 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
512 
513 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
514 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
515 #else
516 # define MMF_DUMP_MASK_DEFAULT_ELF	0
517 #endif
518 					/* leave room for more dump flags */
519 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
520 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
521 #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
522 
523 #define MMF_HAS_UPROBES		19	/* has uprobes */
524 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
525 #define MMF_OOM_REAPED		21	/* mm has been already reaped */
526 #define MMF_OOM_NOT_REAPABLE	22	/* mm couldn't be reaped */
527 
528 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
529 
530 struct sighand_struct {
531 	atomic_t		count;
532 	struct k_sigaction	action[_NSIG];
533 	spinlock_t		siglock;
534 	wait_queue_head_t	signalfd_wqh;
535 };
536 
537 struct pacct_struct {
538 	int			ac_flag;
539 	long			ac_exitcode;
540 	unsigned long		ac_mem;
541 	cputime_t		ac_utime, ac_stime;
542 	unsigned long		ac_minflt, ac_majflt;
543 };
544 
545 struct cpu_itimer {
546 	cputime_t expires;
547 	cputime_t incr;
548 	u32 error;
549 	u32 incr_error;
550 };
551 
552 /**
553  * struct prev_cputime - snaphsot of system and user cputime
554  * @utime: time spent in user mode
555  * @stime: time spent in system mode
556  * @lock: protects the above two fields
557  *
558  * Stores previous user/system time values such that we can guarantee
559  * monotonicity.
560  */
561 struct prev_cputime {
562 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
563 	cputime_t utime;
564 	cputime_t stime;
565 	raw_spinlock_t lock;
566 #endif
567 };
568 
569 static inline void prev_cputime_init(struct prev_cputime *prev)
570 {
571 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
572 	prev->utime = prev->stime = 0;
573 	raw_spin_lock_init(&prev->lock);
574 #endif
575 }
576 
577 /**
578  * struct task_cputime - collected CPU time counts
579  * @utime:		time spent in user mode, in &cputime_t units
580  * @stime:		time spent in kernel mode, in &cputime_t units
581  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
582  *
583  * This structure groups together three kinds of CPU time that are tracked for
584  * threads and thread groups.  Most things considering CPU time want to group
585  * these counts together and treat all three of them in parallel.
586  */
587 struct task_cputime {
588 	cputime_t utime;
589 	cputime_t stime;
590 	unsigned long long sum_exec_runtime;
591 };
592 
593 /* Alternate field names when used to cache expirations. */
594 #define virt_exp	utime
595 #define prof_exp	stime
596 #define sched_exp	sum_exec_runtime
597 
598 #define INIT_CPUTIME	\
599 	(struct task_cputime) {					\
600 		.utime = 0,					\
601 		.stime = 0,					\
602 		.sum_exec_runtime = 0,				\
603 	}
604 
605 /*
606  * This is the atomic variant of task_cputime, which can be used for
607  * storing and updating task_cputime statistics without locking.
608  */
609 struct task_cputime_atomic {
610 	atomic64_t utime;
611 	atomic64_t stime;
612 	atomic64_t sum_exec_runtime;
613 };
614 
615 #define INIT_CPUTIME_ATOMIC \
616 	(struct task_cputime_atomic) {				\
617 		.utime = ATOMIC64_INIT(0),			\
618 		.stime = ATOMIC64_INIT(0),			\
619 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
620 	}
621 
622 #define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
623 
624 /*
625  * Disable preemption until the scheduler is running -- use an unconditional
626  * value so that it also works on !PREEMPT_COUNT kernels.
627  *
628  * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
629  */
630 #define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
631 
632 /*
633  * Initial preempt_count value; reflects the preempt_count schedule invariant
634  * which states that during context switches:
635  *
636  *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
637  *
638  * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
639  * Note: See finish_task_switch().
640  */
641 #define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
642 
643 /**
644  * struct thread_group_cputimer - thread group interval timer counts
645  * @cputime_atomic:	atomic thread group interval timers.
646  * @running:		true when there are timers running and
647  *			@cputime_atomic receives updates.
648  * @checking_timer:	true when a thread in the group is in the
649  *			process of checking for thread group timers.
650  *
651  * This structure contains the version of task_cputime, above, that is
652  * used for thread group CPU timer calculations.
653  */
654 struct thread_group_cputimer {
655 	struct task_cputime_atomic cputime_atomic;
656 	bool running;
657 	bool checking_timer;
658 };
659 
660 #include <linux/rwsem.h>
661 struct autogroup;
662 
663 /*
664  * NOTE! "signal_struct" does not have its own
665  * locking, because a shared signal_struct always
666  * implies a shared sighand_struct, so locking
667  * sighand_struct is always a proper superset of
668  * the locking of signal_struct.
669  */
670 struct signal_struct {
671 	atomic_t		sigcnt;
672 	atomic_t		live;
673 	int			nr_threads;
674 	atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
675 	struct list_head	thread_head;
676 
677 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
678 
679 	/* current thread group signal load-balancing target: */
680 	struct task_struct	*curr_target;
681 
682 	/* shared signal handling: */
683 	struct sigpending	shared_pending;
684 
685 	/* thread group exit support */
686 	int			group_exit_code;
687 	/* overloaded:
688 	 * - notify group_exit_task when ->count is equal to notify_count
689 	 * - everyone except group_exit_task is stopped during signal delivery
690 	 *   of fatal signals, group_exit_task processes the signal.
691 	 */
692 	int			notify_count;
693 	struct task_struct	*group_exit_task;
694 
695 	/* thread group stop support, overloads group_exit_code too */
696 	int			group_stop_count;
697 	unsigned int		flags; /* see SIGNAL_* flags below */
698 
699 	/*
700 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
701 	 * manager, to re-parent orphan (double-forking) child processes
702 	 * to this process instead of 'init'. The service manager is
703 	 * able to receive SIGCHLD signals and is able to investigate
704 	 * the process until it calls wait(). All children of this
705 	 * process will inherit a flag if they should look for a
706 	 * child_subreaper process at exit.
707 	 */
708 	unsigned int		is_child_subreaper:1;
709 	unsigned int		has_child_subreaper:1;
710 
711 	/* POSIX.1b Interval Timers */
712 	int			posix_timer_id;
713 	struct list_head	posix_timers;
714 
715 	/* ITIMER_REAL timer for the process */
716 	struct hrtimer real_timer;
717 	struct pid *leader_pid;
718 	ktime_t it_real_incr;
719 
720 	/*
721 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
722 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
723 	 * values are defined to 0 and 1 respectively
724 	 */
725 	struct cpu_itimer it[2];
726 
727 	/*
728 	 * Thread group totals for process CPU timers.
729 	 * See thread_group_cputimer(), et al, for details.
730 	 */
731 	struct thread_group_cputimer cputimer;
732 
733 	/* Earliest-expiration cache. */
734 	struct task_cputime cputime_expires;
735 
736 #ifdef CONFIG_NO_HZ_FULL
737 	atomic_t tick_dep_mask;
738 #endif
739 
740 	struct list_head cpu_timers[3];
741 
742 	struct pid *tty_old_pgrp;
743 
744 	/* boolean value for session group leader */
745 	int leader;
746 
747 	struct tty_struct *tty; /* NULL if no tty */
748 
749 #ifdef CONFIG_SCHED_AUTOGROUP
750 	struct autogroup *autogroup;
751 #endif
752 	/*
753 	 * Cumulative resource counters for dead threads in the group,
754 	 * and for reaped dead child processes forked by this group.
755 	 * Live threads maintain their own counters and add to these
756 	 * in __exit_signal, except for the group leader.
757 	 */
758 	seqlock_t stats_lock;
759 	cputime_t utime, stime, cutime, cstime;
760 	cputime_t gtime;
761 	cputime_t cgtime;
762 	struct prev_cputime prev_cputime;
763 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
764 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
765 	unsigned long inblock, oublock, cinblock, coublock;
766 	unsigned long maxrss, cmaxrss;
767 	struct task_io_accounting ioac;
768 
769 	/*
770 	 * Cumulative ns of schedule CPU time fo dead threads in the
771 	 * group, not including a zombie group leader, (This only differs
772 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
773 	 * other than jiffies.)
774 	 */
775 	unsigned long long sum_sched_runtime;
776 
777 	/*
778 	 * We don't bother to synchronize most readers of this at all,
779 	 * because there is no reader checking a limit that actually needs
780 	 * to get both rlim_cur and rlim_max atomically, and either one
781 	 * alone is a single word that can safely be read normally.
782 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
783 	 * protect this instead of the siglock, because they really
784 	 * have no need to disable irqs.
785 	 */
786 	struct rlimit rlim[RLIM_NLIMITS];
787 
788 #ifdef CONFIG_BSD_PROCESS_ACCT
789 	struct pacct_struct pacct;	/* per-process accounting information */
790 #endif
791 #ifdef CONFIG_TASKSTATS
792 	struct taskstats *stats;
793 #endif
794 #ifdef CONFIG_AUDIT
795 	unsigned audit_tty;
796 	struct tty_audit_buf *tty_audit_buf;
797 #endif
798 
799 	/*
800 	 * Thread is the potential origin of an oom condition; kill first on
801 	 * oom
802 	 */
803 	bool oom_flag_origin;
804 	short oom_score_adj;		/* OOM kill score adjustment */
805 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
806 					 * Only settable by CAP_SYS_RESOURCE. */
807 
808 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
809 					 * credential calculations
810 					 * (notably. ptrace) */
811 };
812 
813 /*
814  * Bits in flags field of signal_struct.
815  */
816 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
817 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
818 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
819 #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
820 /*
821  * Pending notifications to parent.
822  */
823 #define SIGNAL_CLD_STOPPED	0x00000010
824 #define SIGNAL_CLD_CONTINUED	0x00000020
825 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
826 
827 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
828 
829 /* If true, all threads except ->group_exit_task have pending SIGKILL */
830 static inline int signal_group_exit(const struct signal_struct *sig)
831 {
832 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
833 		(sig->group_exit_task != NULL);
834 }
835 
836 /*
837  * Some day this will be a full-fledged user tracking system..
838  */
839 struct user_struct {
840 	atomic_t __count;	/* reference count */
841 	atomic_t processes;	/* How many processes does this user have? */
842 	atomic_t sigpending;	/* How many pending signals does this user have? */
843 #ifdef CONFIG_INOTIFY_USER
844 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
845 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
846 #endif
847 #ifdef CONFIG_FANOTIFY
848 	atomic_t fanotify_listeners;
849 #endif
850 #ifdef CONFIG_EPOLL
851 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
852 #endif
853 #ifdef CONFIG_POSIX_MQUEUE
854 	/* protected by mq_lock	*/
855 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
856 #endif
857 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
858 	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
859 	atomic_long_t pipe_bufs;  /* how many pages are allocated in pipe buffers */
860 
861 #ifdef CONFIG_KEYS
862 	struct key *uid_keyring;	/* UID specific keyring */
863 	struct key *session_keyring;	/* UID's default session keyring */
864 #endif
865 
866 	/* Hash table maintenance information */
867 	struct hlist_node uidhash_node;
868 	kuid_t uid;
869 
870 #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
871 	atomic_long_t locked_vm;
872 #endif
873 };
874 
875 extern int uids_sysfs_init(void);
876 
877 extern struct user_struct *find_user(kuid_t);
878 
879 extern struct user_struct root_user;
880 #define INIT_USER (&root_user)
881 
882 
883 struct backing_dev_info;
884 struct reclaim_state;
885 
886 #ifdef CONFIG_SCHED_INFO
887 struct sched_info {
888 	/* cumulative counters */
889 	unsigned long pcount;	      /* # of times run on this cpu */
890 	unsigned long long run_delay; /* time spent waiting on a runqueue */
891 
892 	/* timestamps */
893 	unsigned long long last_arrival,/* when we last ran on a cpu */
894 			   last_queued;	/* when we were last queued to run */
895 };
896 #endif /* CONFIG_SCHED_INFO */
897 
898 #ifdef CONFIG_TASK_DELAY_ACCT
899 struct task_delay_info {
900 	spinlock_t	lock;
901 	unsigned int	flags;	/* Private per-task flags */
902 
903 	/* For each stat XXX, add following, aligned appropriately
904 	 *
905 	 * struct timespec XXX_start, XXX_end;
906 	 * u64 XXX_delay;
907 	 * u32 XXX_count;
908 	 *
909 	 * Atomicity of updates to XXX_delay, XXX_count protected by
910 	 * single lock above (split into XXX_lock if contention is an issue).
911 	 */
912 
913 	/*
914 	 * XXX_count is incremented on every XXX operation, the delay
915 	 * associated with the operation is added to XXX_delay.
916 	 * XXX_delay contains the accumulated delay time in nanoseconds.
917 	 */
918 	u64 blkio_start;	/* Shared by blkio, swapin */
919 	u64 blkio_delay;	/* wait for sync block io completion */
920 	u64 swapin_delay;	/* wait for swapin block io completion */
921 	u32 blkio_count;	/* total count of the number of sync block */
922 				/* io operations performed */
923 	u32 swapin_count;	/* total count of the number of swapin block */
924 				/* io operations performed */
925 
926 	u64 freepages_start;
927 	u64 freepages_delay;	/* wait for memory reclaim */
928 	u32 freepages_count;	/* total count of memory reclaim */
929 };
930 #endif	/* CONFIG_TASK_DELAY_ACCT */
931 
932 static inline int sched_info_on(void)
933 {
934 #ifdef CONFIG_SCHEDSTATS
935 	return 1;
936 #elif defined(CONFIG_TASK_DELAY_ACCT)
937 	extern int delayacct_on;
938 	return delayacct_on;
939 #else
940 	return 0;
941 #endif
942 }
943 
944 #ifdef CONFIG_SCHEDSTATS
945 void force_schedstat_enabled(void);
946 #endif
947 
948 enum cpu_idle_type {
949 	CPU_IDLE,
950 	CPU_NOT_IDLE,
951 	CPU_NEWLY_IDLE,
952 	CPU_MAX_IDLE_TYPES
953 };
954 
955 /*
956  * Integer metrics need fixed point arithmetic, e.g., sched/fair
957  * has a few: load, load_avg, util_avg, freq, and capacity.
958  *
959  * We define a basic fixed point arithmetic range, and then formalize
960  * all these metrics based on that basic range.
961  */
962 # define SCHED_FIXEDPOINT_SHIFT	10
963 # define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)
964 
965 /*
966  * Increase resolution of cpu_capacity calculations
967  */
968 #define SCHED_CAPACITY_SHIFT	SCHED_FIXEDPOINT_SHIFT
969 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
970 
971 /*
972  * Wake-queues are lists of tasks with a pending wakeup, whose
973  * callers have already marked the task as woken internally,
974  * and can thus carry on. A common use case is being able to
975  * do the wakeups once the corresponding user lock as been
976  * released.
977  *
978  * We hold reference to each task in the list across the wakeup,
979  * thus guaranteeing that the memory is still valid by the time
980  * the actual wakeups are performed in wake_up_q().
981  *
982  * One per task suffices, because there's never a need for a task to be
983  * in two wake queues simultaneously; it is forbidden to abandon a task
984  * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
985  * already in a wake queue, the wakeup will happen soon and the second
986  * waker can just skip it.
987  *
988  * The WAKE_Q macro declares and initializes the list head.
989  * wake_up_q() does NOT reinitialize the list; it's expected to be
990  * called near the end of a function, where the fact that the queue is
991  * not used again will be easy to see by inspection.
992  *
993  * Note that this can cause spurious wakeups. schedule() callers
994  * must ensure the call is done inside a loop, confirming that the
995  * wakeup condition has in fact occurred.
996  */
997 struct wake_q_node {
998 	struct wake_q_node *next;
999 };
1000 
1001 struct wake_q_head {
1002 	struct wake_q_node *first;
1003 	struct wake_q_node **lastp;
1004 };
1005 
1006 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1007 
1008 #define WAKE_Q(name)					\
1009 	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1010 
1011 extern void wake_q_add(struct wake_q_head *head,
1012 		       struct task_struct *task);
1013 extern void wake_up_q(struct wake_q_head *head);
1014 
1015 /*
1016  * sched-domains (multiprocessor balancing) declarations:
1017  */
1018 #ifdef CONFIG_SMP
1019 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
1020 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
1021 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
1022 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
1023 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
1024 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
1025 #define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
1026 #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
1027 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
1028 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
1029 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
1030 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
1031 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
1032 #define SD_NUMA			0x4000	/* cross-node balancing */
1033 
1034 #ifdef CONFIG_SCHED_SMT
1035 static inline int cpu_smt_flags(void)
1036 {
1037 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1038 }
1039 #endif
1040 
1041 #ifdef CONFIG_SCHED_MC
1042 static inline int cpu_core_flags(void)
1043 {
1044 	return SD_SHARE_PKG_RESOURCES;
1045 }
1046 #endif
1047 
1048 #ifdef CONFIG_NUMA
1049 static inline int cpu_numa_flags(void)
1050 {
1051 	return SD_NUMA;
1052 }
1053 #endif
1054 
1055 struct sched_domain_attr {
1056 	int relax_domain_level;
1057 };
1058 
1059 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
1060 	.relax_domain_level = -1,			\
1061 }
1062 
1063 extern int sched_domain_level_max;
1064 
1065 struct sched_group;
1066 
1067 struct sched_domain {
1068 	/* These fields must be setup */
1069 	struct sched_domain *parent;	/* top domain must be null terminated */
1070 	struct sched_domain *child;	/* bottom domain must be null terminated */
1071 	struct sched_group *groups;	/* the balancing groups of the domain */
1072 	unsigned long min_interval;	/* Minimum balance interval ms */
1073 	unsigned long max_interval;	/* Maximum balance interval ms */
1074 	unsigned int busy_factor;	/* less balancing by factor if busy */
1075 	unsigned int imbalance_pct;	/* No balance until over watermark */
1076 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
1077 	unsigned int busy_idx;
1078 	unsigned int idle_idx;
1079 	unsigned int newidle_idx;
1080 	unsigned int wake_idx;
1081 	unsigned int forkexec_idx;
1082 	unsigned int smt_gain;
1083 
1084 	int nohz_idle;			/* NOHZ IDLE status */
1085 	int flags;			/* See SD_* */
1086 	int level;
1087 
1088 	/* Runtime fields. */
1089 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
1090 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
1091 	unsigned int nr_balance_failed; /* initialise to 0 */
1092 
1093 	/* idle_balance() stats */
1094 	u64 max_newidle_lb_cost;
1095 	unsigned long next_decay_max_lb_cost;
1096 
1097 #ifdef CONFIG_SCHEDSTATS
1098 	/* load_balance() stats */
1099 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1100 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1101 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1102 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1103 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1104 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1105 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1106 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1107 
1108 	/* Active load balancing */
1109 	unsigned int alb_count;
1110 	unsigned int alb_failed;
1111 	unsigned int alb_pushed;
1112 
1113 	/* SD_BALANCE_EXEC stats */
1114 	unsigned int sbe_count;
1115 	unsigned int sbe_balanced;
1116 	unsigned int sbe_pushed;
1117 
1118 	/* SD_BALANCE_FORK stats */
1119 	unsigned int sbf_count;
1120 	unsigned int sbf_balanced;
1121 	unsigned int sbf_pushed;
1122 
1123 	/* try_to_wake_up() stats */
1124 	unsigned int ttwu_wake_remote;
1125 	unsigned int ttwu_move_affine;
1126 	unsigned int ttwu_move_balance;
1127 #endif
1128 #ifdef CONFIG_SCHED_DEBUG
1129 	char *name;
1130 #endif
1131 	union {
1132 		void *private;		/* used during construction */
1133 		struct rcu_head rcu;	/* used during destruction */
1134 	};
1135 
1136 	unsigned int span_weight;
1137 	/*
1138 	 * Span of all CPUs in this domain.
1139 	 *
1140 	 * NOTE: this field is variable length. (Allocated dynamically
1141 	 * by attaching extra space to the end of the structure,
1142 	 * depending on how many CPUs the kernel has booted up with)
1143 	 */
1144 	unsigned long span[0];
1145 };
1146 
1147 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1148 {
1149 	return to_cpumask(sd->span);
1150 }
1151 
1152 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1153 				    struct sched_domain_attr *dattr_new);
1154 
1155 /* Allocate an array of sched domains, for partition_sched_domains(). */
1156 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1157 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1158 
1159 bool cpus_share_cache(int this_cpu, int that_cpu);
1160 
1161 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1162 typedef int (*sched_domain_flags_f)(void);
1163 
1164 #define SDTL_OVERLAP	0x01
1165 
1166 struct sd_data {
1167 	struct sched_domain **__percpu sd;
1168 	struct sched_group **__percpu sg;
1169 	struct sched_group_capacity **__percpu sgc;
1170 };
1171 
1172 struct sched_domain_topology_level {
1173 	sched_domain_mask_f mask;
1174 	sched_domain_flags_f sd_flags;
1175 	int		    flags;
1176 	int		    numa_level;
1177 	struct sd_data      data;
1178 #ifdef CONFIG_SCHED_DEBUG
1179 	char                *name;
1180 #endif
1181 };
1182 
1183 extern void set_sched_topology(struct sched_domain_topology_level *tl);
1184 extern void wake_up_if_idle(int cpu);
1185 
1186 #ifdef CONFIG_SCHED_DEBUG
1187 # define SD_INIT_NAME(type)		.name = #type
1188 #else
1189 # define SD_INIT_NAME(type)
1190 #endif
1191 
1192 #else /* CONFIG_SMP */
1193 
1194 struct sched_domain_attr;
1195 
1196 static inline void
1197 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1198 			struct sched_domain_attr *dattr_new)
1199 {
1200 }
1201 
1202 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1203 {
1204 	return true;
1205 }
1206 
1207 #endif	/* !CONFIG_SMP */
1208 
1209 
1210 struct io_context;			/* See blkdev.h */
1211 
1212 
1213 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1214 extern void prefetch_stack(struct task_struct *t);
1215 #else
1216 static inline void prefetch_stack(struct task_struct *t) { }
1217 #endif
1218 
1219 struct audit_context;		/* See audit.c */
1220 struct mempolicy;
1221 struct pipe_inode_info;
1222 struct uts_namespace;
1223 
1224 struct load_weight {
1225 	unsigned long weight;
1226 	u32 inv_weight;
1227 };
1228 
1229 /*
1230  * The load_avg/util_avg accumulates an infinite geometric series
1231  * (see __update_load_avg() in kernel/sched/fair.c).
1232  *
1233  * [load_avg definition]
1234  *
1235  *   load_avg = runnable% * scale_load_down(load)
1236  *
1237  * where runnable% is the time ratio that a sched_entity is runnable.
1238  * For cfs_rq, it is the aggregated load_avg of all runnable and
1239  * blocked sched_entities.
1240  *
1241  * load_avg may also take frequency scaling into account:
1242  *
1243  *   load_avg = runnable% * scale_load_down(load) * freq%
1244  *
1245  * where freq% is the CPU frequency normalized to the highest frequency.
1246  *
1247  * [util_avg definition]
1248  *
1249  *   util_avg = running% * SCHED_CAPACITY_SCALE
1250  *
1251  * where running% is the time ratio that a sched_entity is running on
1252  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
1253  * and blocked sched_entities.
1254  *
1255  * util_avg may also factor frequency scaling and CPU capacity scaling:
1256  *
1257  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
1258  *
1259  * where freq% is the same as above, and capacity% is the CPU capacity
1260  * normalized to the greatest capacity (due to uarch differences, etc).
1261  *
1262  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
1263  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
1264  * we therefore scale them to as large a range as necessary. This is for
1265  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
1266  *
1267  * [Overflow issue]
1268  *
1269  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
1270  * with the highest load (=88761), always runnable on a single cfs_rq,
1271  * and should not overflow as the number already hits PID_MAX_LIMIT.
1272  *
1273  * For all other cases (including 32-bit kernels), struct load_weight's
1274  * weight will overflow first before we do, because:
1275  *
1276  *    Max(load_avg) <= Max(load.weight)
1277  *
1278  * Then it is the load_weight's responsibility to consider overflow
1279  * issues.
1280  */
1281 struct sched_avg {
1282 	u64 last_update_time, load_sum;
1283 	u32 util_sum, period_contrib;
1284 	unsigned long load_avg, util_avg;
1285 };
1286 
1287 #ifdef CONFIG_SCHEDSTATS
1288 struct sched_statistics {
1289 	u64			wait_start;
1290 	u64			wait_max;
1291 	u64			wait_count;
1292 	u64			wait_sum;
1293 	u64			iowait_count;
1294 	u64			iowait_sum;
1295 
1296 	u64			sleep_start;
1297 	u64			sleep_max;
1298 	s64			sum_sleep_runtime;
1299 
1300 	u64			block_start;
1301 	u64			block_max;
1302 	u64			exec_max;
1303 	u64			slice_max;
1304 
1305 	u64			nr_migrations_cold;
1306 	u64			nr_failed_migrations_affine;
1307 	u64			nr_failed_migrations_running;
1308 	u64			nr_failed_migrations_hot;
1309 	u64			nr_forced_migrations;
1310 
1311 	u64			nr_wakeups;
1312 	u64			nr_wakeups_sync;
1313 	u64			nr_wakeups_migrate;
1314 	u64			nr_wakeups_local;
1315 	u64			nr_wakeups_remote;
1316 	u64			nr_wakeups_affine;
1317 	u64			nr_wakeups_affine_attempts;
1318 	u64			nr_wakeups_passive;
1319 	u64			nr_wakeups_idle;
1320 };
1321 #endif
1322 
1323 struct sched_entity {
1324 	struct load_weight	load;		/* for load-balancing */
1325 	struct rb_node		run_node;
1326 	struct list_head	group_node;
1327 	unsigned int		on_rq;
1328 
1329 	u64			exec_start;
1330 	u64			sum_exec_runtime;
1331 	u64			vruntime;
1332 	u64			prev_sum_exec_runtime;
1333 
1334 	u64			nr_migrations;
1335 
1336 #ifdef CONFIG_SCHEDSTATS
1337 	struct sched_statistics statistics;
1338 #endif
1339 
1340 #ifdef CONFIG_FAIR_GROUP_SCHED
1341 	int			depth;
1342 	struct sched_entity	*parent;
1343 	/* rq on which this entity is (to be) queued: */
1344 	struct cfs_rq		*cfs_rq;
1345 	/* rq "owned" by this entity/group: */
1346 	struct cfs_rq		*my_q;
1347 #endif
1348 
1349 #ifdef CONFIG_SMP
1350 	/*
1351 	 * Per entity load average tracking.
1352 	 *
1353 	 * Put into separate cache line so it does not
1354 	 * collide with read-mostly values above.
1355 	 */
1356 	struct sched_avg	avg ____cacheline_aligned_in_smp;
1357 #endif
1358 };
1359 
1360 struct sched_rt_entity {
1361 	struct list_head run_list;
1362 	unsigned long timeout;
1363 	unsigned long watchdog_stamp;
1364 	unsigned int time_slice;
1365 	unsigned short on_rq;
1366 	unsigned short on_list;
1367 
1368 	struct sched_rt_entity *back;
1369 #ifdef CONFIG_RT_GROUP_SCHED
1370 	struct sched_rt_entity	*parent;
1371 	/* rq on which this entity is (to be) queued: */
1372 	struct rt_rq		*rt_rq;
1373 	/* rq "owned" by this entity/group: */
1374 	struct rt_rq		*my_q;
1375 #endif
1376 };
1377 
1378 struct sched_dl_entity {
1379 	struct rb_node	rb_node;
1380 
1381 	/*
1382 	 * Original scheduling parameters. Copied here from sched_attr
1383 	 * during sched_setattr(), they will remain the same until
1384 	 * the next sched_setattr().
1385 	 */
1386 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1387 	u64 dl_deadline;	/* relative deadline of each instance	*/
1388 	u64 dl_period;		/* separation of two instances (period) */
1389 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1390 
1391 	/*
1392 	 * Actual scheduling parameters. Initialized with the values above,
1393 	 * they are continously updated during task execution. Note that
1394 	 * the remaining runtime could be < 0 in case we are in overrun.
1395 	 */
1396 	s64 runtime;		/* remaining runtime for this instance	*/
1397 	u64 deadline;		/* absolute deadline for this instance	*/
1398 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1399 
1400 	/*
1401 	 * Some bool flags:
1402 	 *
1403 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1404 	 * task has to wait for a replenishment to be performed at the
1405 	 * next firing of dl_timer.
1406 	 *
1407 	 * @dl_boosted tells if we are boosted due to DI. If so we are
1408 	 * outside bandwidth enforcement mechanism (but only until we
1409 	 * exit the critical section);
1410 	 *
1411 	 * @dl_yielded tells if task gave up the cpu before consuming
1412 	 * all its available runtime during the last job.
1413 	 */
1414 	int dl_throttled, dl_boosted, dl_yielded;
1415 
1416 	/*
1417 	 * Bandwidth enforcement timer. Each -deadline task has its
1418 	 * own bandwidth to be enforced, thus we need one timer per task.
1419 	 */
1420 	struct hrtimer dl_timer;
1421 };
1422 
1423 union rcu_special {
1424 	struct {
1425 		u8 blocked;
1426 		u8 need_qs;
1427 		u8 exp_need_qs;
1428 		u8 pad;	/* Otherwise the compiler can store garbage here. */
1429 	} b; /* Bits. */
1430 	u32 s; /* Set of bits. */
1431 };
1432 struct rcu_node;
1433 
1434 enum perf_event_task_context {
1435 	perf_invalid_context = -1,
1436 	perf_hw_context = 0,
1437 	perf_sw_context,
1438 	perf_nr_task_contexts,
1439 };
1440 
1441 /* Track pages that require TLB flushes */
1442 struct tlbflush_unmap_batch {
1443 	/*
1444 	 * Each bit set is a CPU that potentially has a TLB entry for one of
1445 	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1446 	 */
1447 	struct cpumask cpumask;
1448 
1449 	/* True if any bit in cpumask is set */
1450 	bool flush_required;
1451 
1452 	/*
1453 	 * If true then the PTE was dirty when unmapped. The entry must be
1454 	 * flushed before IO is initiated or a stale TLB entry potentially
1455 	 * allows an update without redirtying the page.
1456 	 */
1457 	bool writable;
1458 };
1459 
1460 struct task_struct {
1461 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1462 	void *stack;
1463 	atomic_t usage;
1464 	unsigned int flags;	/* per process flags, defined below */
1465 	unsigned int ptrace;
1466 
1467 #ifdef CONFIG_SMP
1468 	struct llist_node wake_entry;
1469 	int on_cpu;
1470 	unsigned int wakee_flips;
1471 	unsigned long wakee_flip_decay_ts;
1472 	struct task_struct *last_wakee;
1473 
1474 	int wake_cpu;
1475 #endif
1476 	int on_rq;
1477 
1478 	int prio, static_prio, normal_prio;
1479 	unsigned int rt_priority;
1480 	const struct sched_class *sched_class;
1481 	struct sched_entity se;
1482 	struct sched_rt_entity rt;
1483 #ifdef CONFIG_CGROUP_SCHED
1484 	struct task_group *sched_task_group;
1485 #endif
1486 	struct sched_dl_entity dl;
1487 
1488 #ifdef CONFIG_PREEMPT_NOTIFIERS
1489 	/* list of struct preempt_notifier: */
1490 	struct hlist_head preempt_notifiers;
1491 #endif
1492 
1493 #ifdef CONFIG_BLK_DEV_IO_TRACE
1494 	unsigned int btrace_seq;
1495 #endif
1496 
1497 	unsigned int policy;
1498 	int nr_cpus_allowed;
1499 	cpumask_t cpus_allowed;
1500 
1501 #ifdef CONFIG_PREEMPT_RCU
1502 	int rcu_read_lock_nesting;
1503 	union rcu_special rcu_read_unlock_special;
1504 	struct list_head rcu_node_entry;
1505 	struct rcu_node *rcu_blocked_node;
1506 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1507 #ifdef CONFIG_TASKS_RCU
1508 	unsigned long rcu_tasks_nvcsw;
1509 	bool rcu_tasks_holdout;
1510 	struct list_head rcu_tasks_holdout_list;
1511 	int rcu_tasks_idle_cpu;
1512 #endif /* #ifdef CONFIG_TASKS_RCU */
1513 
1514 #ifdef CONFIG_SCHED_INFO
1515 	struct sched_info sched_info;
1516 #endif
1517 
1518 	struct list_head tasks;
1519 #ifdef CONFIG_SMP
1520 	struct plist_node pushable_tasks;
1521 	struct rb_node pushable_dl_tasks;
1522 #endif
1523 
1524 	struct mm_struct *mm, *active_mm;
1525 	/* per-thread vma caching */
1526 	u32 vmacache_seqnum;
1527 	struct vm_area_struct *vmacache[VMACACHE_SIZE];
1528 #if defined(SPLIT_RSS_COUNTING)
1529 	struct task_rss_stat	rss_stat;
1530 #endif
1531 /* task state */
1532 	int exit_state;
1533 	int exit_code, exit_signal;
1534 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1535 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
1536 
1537 	/* Used for emulating ABI behavior of previous Linux versions */
1538 	unsigned int personality;
1539 
1540 	/* scheduler bits, serialized by scheduler locks */
1541 	unsigned sched_reset_on_fork:1;
1542 	unsigned sched_contributes_to_load:1;
1543 	unsigned sched_migrated:1;
1544 	unsigned sched_remote_wakeup:1;
1545 	unsigned :0; /* force alignment to the next boundary */
1546 
1547 	/* unserialized, strictly 'current' */
1548 	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
1549 	unsigned in_iowait:1;
1550 #if !defined(TIF_RESTORE_SIGMASK)
1551 	unsigned restore_sigmask:1;
1552 #endif
1553 #ifdef CONFIG_MEMCG
1554 	unsigned memcg_may_oom:1;
1555 #ifndef CONFIG_SLOB
1556 	unsigned memcg_kmem_skip_account:1;
1557 #endif
1558 #endif
1559 #ifdef CONFIG_COMPAT_BRK
1560 	unsigned brk_randomized:1;
1561 #endif
1562 
1563 	unsigned long atomic_flags; /* Flags needing atomic access. */
1564 
1565 	struct restart_block restart_block;
1566 
1567 	pid_t pid;
1568 	pid_t tgid;
1569 
1570 #ifdef CONFIG_CC_STACKPROTECTOR
1571 	/* Canary value for the -fstack-protector gcc feature */
1572 	unsigned long stack_canary;
1573 #endif
1574 	/*
1575 	 * pointers to (original) parent process, youngest child, younger sibling,
1576 	 * older sibling, respectively.  (p->father can be replaced with
1577 	 * p->real_parent->pid)
1578 	 */
1579 	struct task_struct __rcu *real_parent; /* real parent process */
1580 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1581 	/*
1582 	 * children/sibling forms the list of my natural children
1583 	 */
1584 	struct list_head children;	/* list of my children */
1585 	struct list_head sibling;	/* linkage in my parent's children list */
1586 	struct task_struct *group_leader;	/* threadgroup leader */
1587 
1588 	/*
1589 	 * ptraced is the list of tasks this task is using ptrace on.
1590 	 * This includes both natural children and PTRACE_ATTACH targets.
1591 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1592 	 */
1593 	struct list_head ptraced;
1594 	struct list_head ptrace_entry;
1595 
1596 	/* PID/PID hash table linkage. */
1597 	struct pid_link pids[PIDTYPE_MAX];
1598 	struct list_head thread_group;
1599 	struct list_head thread_node;
1600 
1601 	struct completion *vfork_done;		/* for vfork() */
1602 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1603 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1604 
1605 	cputime_t utime, stime, utimescaled, stimescaled;
1606 	cputime_t gtime;
1607 	struct prev_cputime prev_cputime;
1608 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1609 	seqcount_t vtime_seqcount;
1610 	unsigned long long vtime_snap;
1611 	enum {
1612 		/* Task is sleeping or running in a CPU with VTIME inactive */
1613 		VTIME_INACTIVE = 0,
1614 		/* Task runs in userspace in a CPU with VTIME active */
1615 		VTIME_USER,
1616 		/* Task runs in kernelspace in a CPU with VTIME active */
1617 		VTIME_SYS,
1618 	} vtime_snap_whence;
1619 #endif
1620 
1621 #ifdef CONFIG_NO_HZ_FULL
1622 	atomic_t tick_dep_mask;
1623 #endif
1624 	unsigned long nvcsw, nivcsw; /* context switch counts */
1625 	u64 start_time;		/* monotonic time in nsec */
1626 	u64 real_start_time;	/* boot based time in nsec */
1627 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1628 	unsigned long min_flt, maj_flt;
1629 
1630 	struct task_cputime cputime_expires;
1631 	struct list_head cpu_timers[3];
1632 
1633 /* process credentials */
1634 	const struct cred __rcu *real_cred; /* objective and real subjective task
1635 					 * credentials (COW) */
1636 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1637 					 * credentials (COW) */
1638 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1639 				     - access with [gs]et_task_comm (which lock
1640 				       it with task_lock())
1641 				     - initialized normally by setup_new_exec */
1642 /* file system info */
1643 	struct nameidata *nameidata;
1644 #ifdef CONFIG_SYSVIPC
1645 /* ipc stuff */
1646 	struct sysv_sem sysvsem;
1647 	struct sysv_shm sysvshm;
1648 #endif
1649 #ifdef CONFIG_DETECT_HUNG_TASK
1650 /* hung task detection */
1651 	unsigned long last_switch_count;
1652 #endif
1653 /* filesystem information */
1654 	struct fs_struct *fs;
1655 /* open file information */
1656 	struct files_struct *files;
1657 /* namespaces */
1658 	struct nsproxy *nsproxy;
1659 /* signal handlers */
1660 	struct signal_struct *signal;
1661 	struct sighand_struct *sighand;
1662 
1663 	sigset_t blocked, real_blocked;
1664 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1665 	struct sigpending pending;
1666 
1667 	unsigned long sas_ss_sp;
1668 	size_t sas_ss_size;
1669 	unsigned sas_ss_flags;
1670 
1671 	struct callback_head *task_works;
1672 
1673 	struct audit_context *audit_context;
1674 #ifdef CONFIG_AUDITSYSCALL
1675 	kuid_t loginuid;
1676 	unsigned int sessionid;
1677 #endif
1678 	struct seccomp seccomp;
1679 
1680 /* Thread group tracking */
1681    	u32 parent_exec_id;
1682    	u32 self_exec_id;
1683 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1684  * mempolicy */
1685 	spinlock_t alloc_lock;
1686 
1687 	/* Protection of the PI data structures: */
1688 	raw_spinlock_t pi_lock;
1689 
1690 	struct wake_q_node wake_q;
1691 
1692 #ifdef CONFIG_RT_MUTEXES
1693 	/* PI waiters blocked on a rt_mutex held by this task */
1694 	struct rb_root pi_waiters;
1695 	struct rb_node *pi_waiters_leftmost;
1696 	/* Deadlock detection and priority inheritance handling */
1697 	struct rt_mutex_waiter *pi_blocked_on;
1698 #endif
1699 
1700 #ifdef CONFIG_DEBUG_MUTEXES
1701 	/* mutex deadlock detection */
1702 	struct mutex_waiter *blocked_on;
1703 #endif
1704 #ifdef CONFIG_TRACE_IRQFLAGS
1705 	unsigned int irq_events;
1706 	unsigned long hardirq_enable_ip;
1707 	unsigned long hardirq_disable_ip;
1708 	unsigned int hardirq_enable_event;
1709 	unsigned int hardirq_disable_event;
1710 	int hardirqs_enabled;
1711 	int hardirq_context;
1712 	unsigned long softirq_disable_ip;
1713 	unsigned long softirq_enable_ip;
1714 	unsigned int softirq_disable_event;
1715 	unsigned int softirq_enable_event;
1716 	int softirqs_enabled;
1717 	int softirq_context;
1718 #endif
1719 #ifdef CONFIG_LOCKDEP
1720 # define MAX_LOCK_DEPTH 48UL
1721 	u64 curr_chain_key;
1722 	int lockdep_depth;
1723 	unsigned int lockdep_recursion;
1724 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1725 	gfp_t lockdep_reclaim_gfp;
1726 #endif
1727 #ifdef CONFIG_UBSAN
1728 	unsigned int in_ubsan;
1729 #endif
1730 
1731 /* journalling filesystem info */
1732 	void *journal_info;
1733 
1734 /* stacked block device info */
1735 	struct bio_list *bio_list;
1736 
1737 #ifdef CONFIG_BLOCK
1738 /* stack plugging */
1739 	struct blk_plug *plug;
1740 #endif
1741 
1742 /* VM state */
1743 	struct reclaim_state *reclaim_state;
1744 
1745 	struct backing_dev_info *backing_dev_info;
1746 
1747 	struct io_context *io_context;
1748 
1749 	unsigned long ptrace_message;
1750 	siginfo_t *last_siginfo; /* For ptrace use.  */
1751 	struct task_io_accounting ioac;
1752 #if defined(CONFIG_TASK_XACCT)
1753 	u64 acct_rss_mem1;	/* accumulated rss usage */
1754 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1755 	cputime_t acct_timexpd;	/* stime + utime since last update */
1756 #endif
1757 #ifdef CONFIG_CPUSETS
1758 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1759 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1760 	int cpuset_mem_spread_rotor;
1761 	int cpuset_slab_spread_rotor;
1762 #endif
1763 #ifdef CONFIG_CGROUPS
1764 	/* Control Group info protected by css_set_lock */
1765 	struct css_set __rcu *cgroups;
1766 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1767 	struct list_head cg_list;
1768 #endif
1769 #ifdef CONFIG_FUTEX
1770 	struct robust_list_head __user *robust_list;
1771 #ifdef CONFIG_COMPAT
1772 	struct compat_robust_list_head __user *compat_robust_list;
1773 #endif
1774 	struct list_head pi_state_list;
1775 	struct futex_pi_state *pi_state_cache;
1776 #endif
1777 #ifdef CONFIG_PERF_EVENTS
1778 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1779 	struct mutex perf_event_mutex;
1780 	struct list_head perf_event_list;
1781 #endif
1782 #ifdef CONFIG_DEBUG_PREEMPT
1783 	unsigned long preempt_disable_ip;
1784 #endif
1785 #ifdef CONFIG_NUMA
1786 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1787 	short il_next;
1788 	short pref_node_fork;
1789 #endif
1790 #ifdef CONFIG_NUMA_BALANCING
1791 	int numa_scan_seq;
1792 	unsigned int numa_scan_period;
1793 	unsigned int numa_scan_period_max;
1794 	int numa_preferred_nid;
1795 	unsigned long numa_migrate_retry;
1796 	u64 node_stamp;			/* migration stamp  */
1797 	u64 last_task_numa_placement;
1798 	u64 last_sum_exec_runtime;
1799 	struct callback_head numa_work;
1800 
1801 	struct list_head numa_entry;
1802 	struct numa_group *numa_group;
1803 
1804 	/*
1805 	 * numa_faults is an array split into four regions:
1806 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1807 	 * in this precise order.
1808 	 *
1809 	 * faults_memory: Exponential decaying average of faults on a per-node
1810 	 * basis. Scheduling placement decisions are made based on these
1811 	 * counts. The values remain static for the duration of a PTE scan.
1812 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1813 	 * hinting fault was incurred.
1814 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1815 	 * during the current scan window. When the scan completes, the counts
1816 	 * in faults_memory and faults_cpu decay and these values are copied.
1817 	 */
1818 	unsigned long *numa_faults;
1819 	unsigned long total_numa_faults;
1820 
1821 	/*
1822 	 * numa_faults_locality tracks if faults recorded during the last
1823 	 * scan window were remote/local or failed to migrate. The task scan
1824 	 * period is adapted based on the locality of the faults with different
1825 	 * weights depending on whether they were shared or private faults
1826 	 */
1827 	unsigned long numa_faults_locality[3];
1828 
1829 	unsigned long numa_pages_migrated;
1830 #endif /* CONFIG_NUMA_BALANCING */
1831 
1832 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1833 	struct tlbflush_unmap_batch tlb_ubc;
1834 #endif
1835 
1836 	struct rcu_head rcu;
1837 
1838 	/*
1839 	 * cache last used pipe for splice
1840 	 */
1841 	struct pipe_inode_info *splice_pipe;
1842 
1843 	struct page_frag task_frag;
1844 
1845 #ifdef	CONFIG_TASK_DELAY_ACCT
1846 	struct task_delay_info *delays;
1847 #endif
1848 #ifdef CONFIG_FAULT_INJECTION
1849 	int make_it_fail;
1850 #endif
1851 	/*
1852 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1853 	 * balance_dirty_pages() for some dirty throttling pause
1854 	 */
1855 	int nr_dirtied;
1856 	int nr_dirtied_pause;
1857 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1858 
1859 #ifdef CONFIG_LATENCYTOP
1860 	int latency_record_count;
1861 	struct latency_record latency_record[LT_SAVECOUNT];
1862 #endif
1863 	/*
1864 	 * time slack values; these are used to round up poll() and
1865 	 * select() etc timeout values. These are in nanoseconds.
1866 	 */
1867 	u64 timer_slack_ns;
1868 	u64 default_timer_slack_ns;
1869 
1870 #ifdef CONFIG_KASAN
1871 	unsigned int kasan_depth;
1872 #endif
1873 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1874 	/* Index of current stored address in ret_stack */
1875 	int curr_ret_stack;
1876 	/* Stack of return addresses for return function tracing */
1877 	struct ftrace_ret_stack	*ret_stack;
1878 	/* time stamp for last schedule */
1879 	unsigned long long ftrace_timestamp;
1880 	/*
1881 	 * Number of functions that haven't been traced
1882 	 * because of depth overrun.
1883 	 */
1884 	atomic_t trace_overrun;
1885 	/* Pause for the tracing */
1886 	atomic_t tracing_graph_pause;
1887 #endif
1888 #ifdef CONFIG_TRACING
1889 	/* state flags for use by tracers */
1890 	unsigned long trace;
1891 	/* bitmask and counter of trace recursion */
1892 	unsigned long trace_recursion;
1893 #endif /* CONFIG_TRACING */
1894 #ifdef CONFIG_KCOV
1895 	/* Coverage collection mode enabled for this task (0 if disabled). */
1896 	enum kcov_mode kcov_mode;
1897 	/* Size of the kcov_area. */
1898 	unsigned	kcov_size;
1899 	/* Buffer for coverage collection. */
1900 	void		*kcov_area;
1901 	/* kcov desciptor wired with this task or NULL. */
1902 	struct kcov	*kcov;
1903 #endif
1904 #ifdef CONFIG_MEMCG
1905 	struct mem_cgroup *memcg_in_oom;
1906 	gfp_t memcg_oom_gfp_mask;
1907 	int memcg_oom_order;
1908 
1909 	/* number of pages to reclaim on returning to userland */
1910 	unsigned int memcg_nr_pages_over_high;
1911 #endif
1912 #ifdef CONFIG_UPROBES
1913 	struct uprobe_task *utask;
1914 #endif
1915 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1916 	unsigned int	sequential_io;
1917 	unsigned int	sequential_io_avg;
1918 #endif
1919 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1920 	unsigned long	task_state_change;
1921 #endif
1922 	int pagefault_disabled;
1923 #ifdef CONFIG_MMU
1924 	struct task_struct *oom_reaper_list;
1925 #endif
1926 /* CPU-specific state of this task */
1927 	struct thread_struct thread;
1928 /*
1929  * WARNING: on x86, 'thread_struct' contains a variable-sized
1930  * structure.  It *MUST* be at the end of 'task_struct'.
1931  *
1932  * Do not put anything below here!
1933  */
1934 };
1935 
1936 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1937 extern int arch_task_struct_size __read_mostly;
1938 #else
1939 # define arch_task_struct_size (sizeof(struct task_struct))
1940 #endif
1941 
1942 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1943 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1944 
1945 static inline int tsk_nr_cpus_allowed(struct task_struct *p)
1946 {
1947 	return p->nr_cpus_allowed;
1948 }
1949 
1950 #define TNF_MIGRATED	0x01
1951 #define TNF_NO_GROUP	0x02
1952 #define TNF_SHARED	0x04
1953 #define TNF_FAULT_LOCAL	0x08
1954 #define TNF_MIGRATE_FAIL 0x10
1955 
1956 static inline bool in_vfork(struct task_struct *tsk)
1957 {
1958 	bool ret;
1959 
1960 	/*
1961 	 * need RCU to access ->real_parent if CLONE_VM was used along with
1962 	 * CLONE_PARENT.
1963 	 *
1964 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
1965 	 * imply CLONE_VM
1966 	 *
1967 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
1968 	 * ->real_parent is not necessarily the task doing vfork(), so in
1969 	 * theory we can't rely on task_lock() if we want to dereference it.
1970 	 *
1971 	 * And in this case we can't trust the real_parent->mm == tsk->mm
1972 	 * check, it can be false negative. But we do not care, if init or
1973 	 * another oom-unkillable task does this it should blame itself.
1974 	 */
1975 	rcu_read_lock();
1976 	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
1977 	rcu_read_unlock();
1978 
1979 	return ret;
1980 }
1981 
1982 #ifdef CONFIG_NUMA_BALANCING
1983 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1984 extern pid_t task_numa_group_id(struct task_struct *p);
1985 extern void set_numabalancing_state(bool enabled);
1986 extern void task_numa_free(struct task_struct *p);
1987 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1988 					int src_nid, int dst_cpu);
1989 #else
1990 static inline void task_numa_fault(int last_node, int node, int pages,
1991 				   int flags)
1992 {
1993 }
1994 static inline pid_t task_numa_group_id(struct task_struct *p)
1995 {
1996 	return 0;
1997 }
1998 static inline void set_numabalancing_state(bool enabled)
1999 {
2000 }
2001 static inline void task_numa_free(struct task_struct *p)
2002 {
2003 }
2004 static inline bool should_numa_migrate_memory(struct task_struct *p,
2005 				struct page *page, int src_nid, int dst_cpu)
2006 {
2007 	return true;
2008 }
2009 #endif
2010 
2011 static inline struct pid *task_pid(struct task_struct *task)
2012 {
2013 	return task->pids[PIDTYPE_PID].pid;
2014 }
2015 
2016 static inline struct pid *task_tgid(struct task_struct *task)
2017 {
2018 	return task->group_leader->pids[PIDTYPE_PID].pid;
2019 }
2020 
2021 /*
2022  * Without tasklist or rcu lock it is not safe to dereference
2023  * the result of task_pgrp/task_session even if task == current,
2024  * we can race with another thread doing sys_setsid/sys_setpgid.
2025  */
2026 static inline struct pid *task_pgrp(struct task_struct *task)
2027 {
2028 	return task->group_leader->pids[PIDTYPE_PGID].pid;
2029 }
2030 
2031 static inline struct pid *task_session(struct task_struct *task)
2032 {
2033 	return task->group_leader->pids[PIDTYPE_SID].pid;
2034 }
2035 
2036 struct pid_namespace;
2037 
2038 /*
2039  * the helpers to get the task's different pids as they are seen
2040  * from various namespaces
2041  *
2042  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
2043  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
2044  *                     current.
2045  * task_xid_nr_ns()  : id seen from the ns specified;
2046  *
2047  * set_task_vxid()   : assigns a virtual id to a task;
2048  *
2049  * see also pid_nr() etc in include/linux/pid.h
2050  */
2051 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2052 			struct pid_namespace *ns);
2053 
2054 static inline pid_t task_pid_nr(struct task_struct *tsk)
2055 {
2056 	return tsk->pid;
2057 }
2058 
2059 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2060 					struct pid_namespace *ns)
2061 {
2062 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2063 }
2064 
2065 static inline pid_t task_pid_vnr(struct task_struct *tsk)
2066 {
2067 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2068 }
2069 
2070 
2071 static inline pid_t task_tgid_nr(struct task_struct *tsk)
2072 {
2073 	return tsk->tgid;
2074 }
2075 
2076 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2077 
2078 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2079 {
2080 	return pid_vnr(task_tgid(tsk));
2081 }
2082 
2083 
2084 static inline int pid_alive(const struct task_struct *p);
2085 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2086 {
2087 	pid_t pid = 0;
2088 
2089 	rcu_read_lock();
2090 	if (pid_alive(tsk))
2091 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2092 	rcu_read_unlock();
2093 
2094 	return pid;
2095 }
2096 
2097 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2098 {
2099 	return task_ppid_nr_ns(tsk, &init_pid_ns);
2100 }
2101 
2102 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2103 					struct pid_namespace *ns)
2104 {
2105 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2106 }
2107 
2108 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2109 {
2110 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2111 }
2112 
2113 
2114 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2115 					struct pid_namespace *ns)
2116 {
2117 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2118 }
2119 
2120 static inline pid_t task_session_vnr(struct task_struct *tsk)
2121 {
2122 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2123 }
2124 
2125 /* obsolete, do not use */
2126 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2127 {
2128 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
2129 }
2130 
2131 /**
2132  * pid_alive - check that a task structure is not stale
2133  * @p: Task structure to be checked.
2134  *
2135  * Test if a process is not yet dead (at most zombie state)
2136  * If pid_alive fails, then pointers within the task structure
2137  * can be stale and must not be dereferenced.
2138  *
2139  * Return: 1 if the process is alive. 0 otherwise.
2140  */
2141 static inline int pid_alive(const struct task_struct *p)
2142 {
2143 	return p->pids[PIDTYPE_PID].pid != NULL;
2144 }
2145 
2146 /**
2147  * is_global_init - check if a task structure is init. Since init
2148  * is free to have sub-threads we need to check tgid.
2149  * @tsk: Task structure to be checked.
2150  *
2151  * Check if a task structure is the first user space task the kernel created.
2152  *
2153  * Return: 1 if the task structure is init. 0 otherwise.
2154  */
2155 static inline int is_global_init(struct task_struct *tsk)
2156 {
2157 	return task_tgid_nr(tsk) == 1;
2158 }
2159 
2160 extern struct pid *cad_pid;
2161 
2162 extern void free_task(struct task_struct *tsk);
2163 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2164 
2165 extern void __put_task_struct(struct task_struct *t);
2166 
2167 static inline void put_task_struct(struct task_struct *t)
2168 {
2169 	if (atomic_dec_and_test(&t->usage))
2170 		__put_task_struct(t);
2171 }
2172 
2173 struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2174 struct task_struct *try_get_task_struct(struct task_struct **ptask);
2175 
2176 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2177 extern void task_cputime(struct task_struct *t,
2178 			 cputime_t *utime, cputime_t *stime);
2179 extern void task_cputime_scaled(struct task_struct *t,
2180 				cputime_t *utimescaled, cputime_t *stimescaled);
2181 extern cputime_t task_gtime(struct task_struct *t);
2182 #else
2183 static inline void task_cputime(struct task_struct *t,
2184 				cputime_t *utime, cputime_t *stime)
2185 {
2186 	if (utime)
2187 		*utime = t->utime;
2188 	if (stime)
2189 		*stime = t->stime;
2190 }
2191 
2192 static inline void task_cputime_scaled(struct task_struct *t,
2193 				       cputime_t *utimescaled,
2194 				       cputime_t *stimescaled)
2195 {
2196 	if (utimescaled)
2197 		*utimescaled = t->utimescaled;
2198 	if (stimescaled)
2199 		*stimescaled = t->stimescaled;
2200 }
2201 
2202 static inline cputime_t task_gtime(struct task_struct *t)
2203 {
2204 	return t->gtime;
2205 }
2206 #endif
2207 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2208 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2209 
2210 /*
2211  * Per process flags
2212  */
2213 #define PF_EXITING	0x00000004	/* getting shut down */
2214 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
2215 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
2216 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
2217 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
2218 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
2219 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
2220 #define PF_DUMPCORE	0x00000200	/* dumped core */
2221 #define PF_SIGNALED	0x00000400	/* killed by a signal */
2222 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
2223 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
2224 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
2225 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
2226 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
2227 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
2228 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
2229 #define PF_KSWAPD	0x00040000	/* I am kswapd */
2230 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
2231 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
2232 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
2233 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
2234 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
2235 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
2236 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
2237 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
2238 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
2239 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
2240 
2241 /*
2242  * Only the _current_ task can read/write to tsk->flags, but other
2243  * tasks can access tsk->flags in readonly mode for example
2244  * with tsk_used_math (like during threaded core dumping).
2245  * There is however an exception to this rule during ptrace
2246  * or during fork: the ptracer task is allowed to write to the
2247  * child->flags of its traced child (same goes for fork, the parent
2248  * can write to the child->flags), because we're guaranteed the
2249  * child is not running and in turn not changing child->flags
2250  * at the same time the parent does it.
2251  */
2252 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2253 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2254 #define clear_used_math() clear_stopped_child_used_math(current)
2255 #define set_used_math() set_stopped_child_used_math(current)
2256 #define conditional_stopped_child_used_math(condition, child) \
2257 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2258 #define conditional_used_math(condition) \
2259 	conditional_stopped_child_used_math(condition, current)
2260 #define copy_to_stopped_child_used_math(child) \
2261 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2262 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2263 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2264 #define used_math() tsk_used_math(current)
2265 
2266 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2267  * __GFP_FS is also cleared as it implies __GFP_IO.
2268  */
2269 static inline gfp_t memalloc_noio_flags(gfp_t flags)
2270 {
2271 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2272 		flags &= ~(__GFP_IO | __GFP_FS);
2273 	return flags;
2274 }
2275 
2276 static inline unsigned int memalloc_noio_save(void)
2277 {
2278 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2279 	current->flags |= PF_MEMALLOC_NOIO;
2280 	return flags;
2281 }
2282 
2283 static inline void memalloc_noio_restore(unsigned int flags)
2284 {
2285 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2286 }
2287 
2288 /* Per-process atomic flags. */
2289 #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2290 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2291 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2292 #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
2293 
2294 
2295 #define TASK_PFA_TEST(name, func)					\
2296 	static inline bool task_##func(struct task_struct *p)		\
2297 	{ return test_bit(PFA_##name, &p->atomic_flags); }
2298 #define TASK_PFA_SET(name, func)					\
2299 	static inline void task_set_##func(struct task_struct *p)	\
2300 	{ set_bit(PFA_##name, &p->atomic_flags); }
2301 #define TASK_PFA_CLEAR(name, func)					\
2302 	static inline void task_clear_##func(struct task_struct *p)	\
2303 	{ clear_bit(PFA_##name, &p->atomic_flags); }
2304 
2305 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2306 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2307 
2308 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2309 TASK_PFA_SET(SPREAD_PAGE, spread_page)
2310 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2311 
2312 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2313 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2314 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2315 
2316 TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2317 TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2318 
2319 /*
2320  * task->jobctl flags
2321  */
2322 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2323 
2324 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
2325 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
2326 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2327 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2328 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2329 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
2330 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2331 
2332 #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
2333 #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
2334 #define JOBCTL_STOP_CONSUME	(1UL << JOBCTL_STOP_CONSUME_BIT)
2335 #define JOBCTL_TRAP_STOP	(1UL << JOBCTL_TRAP_STOP_BIT)
2336 #define JOBCTL_TRAP_NOTIFY	(1UL << JOBCTL_TRAP_NOTIFY_BIT)
2337 #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
2338 #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
2339 
2340 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2341 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2342 
2343 extern bool task_set_jobctl_pending(struct task_struct *task,
2344 				    unsigned long mask);
2345 extern void task_clear_jobctl_trapping(struct task_struct *task);
2346 extern void task_clear_jobctl_pending(struct task_struct *task,
2347 				      unsigned long mask);
2348 
2349 static inline void rcu_copy_process(struct task_struct *p)
2350 {
2351 #ifdef CONFIG_PREEMPT_RCU
2352 	p->rcu_read_lock_nesting = 0;
2353 	p->rcu_read_unlock_special.s = 0;
2354 	p->rcu_blocked_node = NULL;
2355 	INIT_LIST_HEAD(&p->rcu_node_entry);
2356 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2357 #ifdef CONFIG_TASKS_RCU
2358 	p->rcu_tasks_holdout = false;
2359 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2360 	p->rcu_tasks_idle_cpu = -1;
2361 #endif /* #ifdef CONFIG_TASKS_RCU */
2362 }
2363 
2364 static inline void tsk_restore_flags(struct task_struct *task,
2365 				unsigned long orig_flags, unsigned long flags)
2366 {
2367 	task->flags &= ~flags;
2368 	task->flags |= orig_flags & flags;
2369 }
2370 
2371 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2372 				     const struct cpumask *trial);
2373 extern int task_can_attach(struct task_struct *p,
2374 			   const struct cpumask *cs_cpus_allowed);
2375 #ifdef CONFIG_SMP
2376 extern void do_set_cpus_allowed(struct task_struct *p,
2377 			       const struct cpumask *new_mask);
2378 
2379 extern int set_cpus_allowed_ptr(struct task_struct *p,
2380 				const struct cpumask *new_mask);
2381 #else
2382 static inline void do_set_cpus_allowed(struct task_struct *p,
2383 				      const struct cpumask *new_mask)
2384 {
2385 }
2386 static inline int set_cpus_allowed_ptr(struct task_struct *p,
2387 				       const struct cpumask *new_mask)
2388 {
2389 	if (!cpumask_test_cpu(0, new_mask))
2390 		return -EINVAL;
2391 	return 0;
2392 }
2393 #endif
2394 
2395 #ifdef CONFIG_NO_HZ_COMMON
2396 void calc_load_enter_idle(void);
2397 void calc_load_exit_idle(void);
2398 #else
2399 static inline void calc_load_enter_idle(void) { }
2400 static inline void calc_load_exit_idle(void) { }
2401 #endif /* CONFIG_NO_HZ_COMMON */
2402 
2403 /*
2404  * Do not use outside of architecture code which knows its limitations.
2405  *
2406  * sched_clock() has no promise of monotonicity or bounded drift between
2407  * CPUs, use (which you should not) requires disabling IRQs.
2408  *
2409  * Please use one of the three interfaces below.
2410  */
2411 extern unsigned long long notrace sched_clock(void);
2412 /*
2413  * See the comment in kernel/sched/clock.c
2414  */
2415 extern u64 running_clock(void);
2416 extern u64 sched_clock_cpu(int cpu);
2417 
2418 
2419 extern void sched_clock_init(void);
2420 
2421 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2422 static inline void sched_clock_tick(void)
2423 {
2424 }
2425 
2426 static inline void sched_clock_idle_sleep_event(void)
2427 {
2428 }
2429 
2430 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2431 {
2432 }
2433 
2434 static inline u64 cpu_clock(int cpu)
2435 {
2436 	return sched_clock();
2437 }
2438 
2439 static inline u64 local_clock(void)
2440 {
2441 	return sched_clock();
2442 }
2443 #else
2444 /*
2445  * Architectures can set this to 1 if they have specified
2446  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2447  * but then during bootup it turns out that sched_clock()
2448  * is reliable after all:
2449  */
2450 extern int sched_clock_stable(void);
2451 extern void set_sched_clock_stable(void);
2452 extern void clear_sched_clock_stable(void);
2453 
2454 extern void sched_clock_tick(void);
2455 extern void sched_clock_idle_sleep_event(void);
2456 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2457 
2458 /*
2459  * As outlined in clock.c, provides a fast, high resolution, nanosecond
2460  * time source that is monotonic per cpu argument and has bounded drift
2461  * between cpus.
2462  *
2463  * ######################### BIG FAT WARNING ##########################
2464  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
2465  * # go backwards !!                                                  #
2466  * ####################################################################
2467  */
2468 static inline u64 cpu_clock(int cpu)
2469 {
2470 	return sched_clock_cpu(cpu);
2471 }
2472 
2473 static inline u64 local_clock(void)
2474 {
2475 	return sched_clock_cpu(raw_smp_processor_id());
2476 }
2477 #endif
2478 
2479 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2480 /*
2481  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2482  * The reason for this explicit opt-in is not to have perf penalty with
2483  * slow sched_clocks.
2484  */
2485 extern void enable_sched_clock_irqtime(void);
2486 extern void disable_sched_clock_irqtime(void);
2487 #else
2488 static inline void enable_sched_clock_irqtime(void) {}
2489 static inline void disable_sched_clock_irqtime(void) {}
2490 #endif
2491 
2492 extern unsigned long long
2493 task_sched_runtime(struct task_struct *task);
2494 
2495 /* sched_exec is called by processes performing an exec */
2496 #ifdef CONFIG_SMP
2497 extern void sched_exec(void);
2498 #else
2499 #define sched_exec()   {}
2500 #endif
2501 
2502 extern void sched_clock_idle_sleep_event(void);
2503 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2504 
2505 #ifdef CONFIG_HOTPLUG_CPU
2506 extern void idle_task_exit(void);
2507 #else
2508 static inline void idle_task_exit(void) {}
2509 #endif
2510 
2511 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2512 extern void wake_up_nohz_cpu(int cpu);
2513 #else
2514 static inline void wake_up_nohz_cpu(int cpu) { }
2515 #endif
2516 
2517 #ifdef CONFIG_NO_HZ_FULL
2518 extern u64 scheduler_tick_max_deferment(void);
2519 #endif
2520 
2521 #ifdef CONFIG_SCHED_AUTOGROUP
2522 extern void sched_autogroup_create_attach(struct task_struct *p);
2523 extern void sched_autogroup_detach(struct task_struct *p);
2524 extern void sched_autogroup_fork(struct signal_struct *sig);
2525 extern void sched_autogroup_exit(struct signal_struct *sig);
2526 #ifdef CONFIG_PROC_FS
2527 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2528 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2529 #endif
2530 #else
2531 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2532 static inline void sched_autogroup_detach(struct task_struct *p) { }
2533 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2534 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2535 #endif
2536 
2537 extern int yield_to(struct task_struct *p, bool preempt);
2538 extern void set_user_nice(struct task_struct *p, long nice);
2539 extern int task_prio(const struct task_struct *p);
2540 /**
2541  * task_nice - return the nice value of a given task.
2542  * @p: the task in question.
2543  *
2544  * Return: The nice value [ -20 ... 0 ... 19 ].
2545  */
2546 static inline int task_nice(const struct task_struct *p)
2547 {
2548 	return PRIO_TO_NICE((p)->static_prio);
2549 }
2550 extern int can_nice(const struct task_struct *p, const int nice);
2551 extern int task_curr(const struct task_struct *p);
2552 extern int idle_cpu(int cpu);
2553 extern int sched_setscheduler(struct task_struct *, int,
2554 			      const struct sched_param *);
2555 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2556 				      const struct sched_param *);
2557 extern int sched_setattr(struct task_struct *,
2558 			 const struct sched_attr *);
2559 extern struct task_struct *idle_task(int cpu);
2560 /**
2561  * is_idle_task - is the specified task an idle task?
2562  * @p: the task in question.
2563  *
2564  * Return: 1 if @p is an idle task. 0 otherwise.
2565  */
2566 static inline bool is_idle_task(const struct task_struct *p)
2567 {
2568 	return p->pid == 0;
2569 }
2570 extern struct task_struct *curr_task(int cpu);
2571 extern void set_curr_task(int cpu, struct task_struct *p);
2572 
2573 void yield(void);
2574 
2575 union thread_union {
2576 	struct thread_info thread_info;
2577 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2578 };
2579 
2580 #ifndef __HAVE_ARCH_KSTACK_END
2581 static inline int kstack_end(void *addr)
2582 {
2583 	/* Reliable end of stack detection:
2584 	 * Some APM bios versions misalign the stack
2585 	 */
2586 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2587 }
2588 #endif
2589 
2590 extern union thread_union init_thread_union;
2591 extern struct task_struct init_task;
2592 
2593 extern struct   mm_struct init_mm;
2594 
2595 extern struct pid_namespace init_pid_ns;
2596 
2597 /*
2598  * find a task by one of its numerical ids
2599  *
2600  * find_task_by_pid_ns():
2601  *      finds a task by its pid in the specified namespace
2602  * find_task_by_vpid():
2603  *      finds a task by its virtual pid
2604  *
2605  * see also find_vpid() etc in include/linux/pid.h
2606  */
2607 
2608 extern struct task_struct *find_task_by_vpid(pid_t nr);
2609 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2610 		struct pid_namespace *ns);
2611 
2612 /* per-UID process charging. */
2613 extern struct user_struct * alloc_uid(kuid_t);
2614 static inline struct user_struct *get_uid(struct user_struct *u)
2615 {
2616 	atomic_inc(&u->__count);
2617 	return u;
2618 }
2619 extern void free_uid(struct user_struct *);
2620 
2621 #include <asm/current.h>
2622 
2623 extern void xtime_update(unsigned long ticks);
2624 
2625 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2626 extern int wake_up_process(struct task_struct *tsk);
2627 extern void wake_up_new_task(struct task_struct *tsk);
2628 #ifdef CONFIG_SMP
2629  extern void kick_process(struct task_struct *tsk);
2630 #else
2631  static inline void kick_process(struct task_struct *tsk) { }
2632 #endif
2633 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2634 extern void sched_dead(struct task_struct *p);
2635 
2636 extern void proc_caches_init(void);
2637 extern void flush_signals(struct task_struct *);
2638 extern void ignore_signals(struct task_struct *);
2639 extern void flush_signal_handlers(struct task_struct *, int force_default);
2640 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2641 
2642 static inline int kernel_dequeue_signal(siginfo_t *info)
2643 {
2644 	struct task_struct *tsk = current;
2645 	siginfo_t __info;
2646 	int ret;
2647 
2648 	spin_lock_irq(&tsk->sighand->siglock);
2649 	ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2650 	spin_unlock_irq(&tsk->sighand->siglock);
2651 
2652 	return ret;
2653 }
2654 
2655 static inline void kernel_signal_stop(void)
2656 {
2657 	spin_lock_irq(&current->sighand->siglock);
2658 	if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2659 		__set_current_state(TASK_STOPPED);
2660 	spin_unlock_irq(&current->sighand->siglock);
2661 
2662 	schedule();
2663 }
2664 
2665 extern void release_task(struct task_struct * p);
2666 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2667 extern int force_sigsegv(int, struct task_struct *);
2668 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2669 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2670 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2671 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2672 				const struct cred *, u32);
2673 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2674 extern int kill_pid(struct pid *pid, int sig, int priv);
2675 extern int kill_proc_info(int, struct siginfo *, pid_t);
2676 extern __must_check bool do_notify_parent(struct task_struct *, int);
2677 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2678 extern void force_sig(int, struct task_struct *);
2679 extern int send_sig(int, struct task_struct *, int);
2680 extern int zap_other_threads(struct task_struct *p);
2681 extern struct sigqueue *sigqueue_alloc(void);
2682 extern void sigqueue_free(struct sigqueue *);
2683 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2684 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2685 
2686 #ifdef TIF_RESTORE_SIGMASK
2687 /*
2688  * Legacy restore_sigmask accessors.  These are inefficient on
2689  * SMP architectures because they require atomic operations.
2690  */
2691 
2692 /**
2693  * set_restore_sigmask() - make sure saved_sigmask processing gets done
2694  *
2695  * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
2696  * will run before returning to user mode, to process the flag.  For
2697  * all callers, TIF_SIGPENDING is already set or it's no harm to set
2698  * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
2699  * arch code will notice on return to user mode, in case those bits
2700  * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
2701  * signal code always gets run when TIF_RESTORE_SIGMASK is set.
2702  */
2703 static inline void set_restore_sigmask(void)
2704 {
2705 	set_thread_flag(TIF_RESTORE_SIGMASK);
2706 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2707 }
2708 static inline void clear_restore_sigmask(void)
2709 {
2710 	clear_thread_flag(TIF_RESTORE_SIGMASK);
2711 }
2712 static inline bool test_restore_sigmask(void)
2713 {
2714 	return test_thread_flag(TIF_RESTORE_SIGMASK);
2715 }
2716 static inline bool test_and_clear_restore_sigmask(void)
2717 {
2718 	return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2719 }
2720 
2721 #else	/* TIF_RESTORE_SIGMASK */
2722 
2723 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
2724 static inline void set_restore_sigmask(void)
2725 {
2726 	current->restore_sigmask = true;
2727 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2728 }
2729 static inline void clear_restore_sigmask(void)
2730 {
2731 	current->restore_sigmask = false;
2732 }
2733 static inline bool test_restore_sigmask(void)
2734 {
2735 	return current->restore_sigmask;
2736 }
2737 static inline bool test_and_clear_restore_sigmask(void)
2738 {
2739 	if (!current->restore_sigmask)
2740 		return false;
2741 	current->restore_sigmask = false;
2742 	return true;
2743 }
2744 #endif
2745 
2746 static inline void restore_saved_sigmask(void)
2747 {
2748 	if (test_and_clear_restore_sigmask())
2749 		__set_current_blocked(&current->saved_sigmask);
2750 }
2751 
2752 static inline sigset_t *sigmask_to_save(void)
2753 {
2754 	sigset_t *res = &current->blocked;
2755 	if (unlikely(test_restore_sigmask()))
2756 		res = &current->saved_sigmask;
2757 	return res;
2758 }
2759 
2760 static inline int kill_cad_pid(int sig, int priv)
2761 {
2762 	return kill_pid(cad_pid, sig, priv);
2763 }
2764 
2765 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2766 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2767 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2768 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2769 
2770 /*
2771  * True if we are on the alternate signal stack.
2772  */
2773 static inline int on_sig_stack(unsigned long sp)
2774 {
2775 	/*
2776 	 * If the signal stack is SS_AUTODISARM then, by construction, we
2777 	 * can't be on the signal stack unless user code deliberately set
2778 	 * SS_AUTODISARM when we were already on it.
2779 	 *
2780 	 * This improves reliability: if user state gets corrupted such that
2781 	 * the stack pointer points very close to the end of the signal stack,
2782 	 * then this check will enable the signal to be handled anyway.
2783 	 */
2784 	if (current->sas_ss_flags & SS_AUTODISARM)
2785 		return 0;
2786 
2787 #ifdef CONFIG_STACK_GROWSUP
2788 	return sp >= current->sas_ss_sp &&
2789 		sp - current->sas_ss_sp < current->sas_ss_size;
2790 #else
2791 	return sp > current->sas_ss_sp &&
2792 		sp - current->sas_ss_sp <= current->sas_ss_size;
2793 #endif
2794 }
2795 
2796 static inline int sas_ss_flags(unsigned long sp)
2797 {
2798 	if (!current->sas_ss_size)
2799 		return SS_DISABLE;
2800 
2801 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
2802 }
2803 
2804 static inline void sas_ss_reset(struct task_struct *p)
2805 {
2806 	p->sas_ss_sp = 0;
2807 	p->sas_ss_size = 0;
2808 	p->sas_ss_flags = SS_DISABLE;
2809 }
2810 
2811 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2812 {
2813 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2814 #ifdef CONFIG_STACK_GROWSUP
2815 		return current->sas_ss_sp;
2816 #else
2817 		return current->sas_ss_sp + current->sas_ss_size;
2818 #endif
2819 	return sp;
2820 }
2821 
2822 /*
2823  * Routines for handling mm_structs
2824  */
2825 extern struct mm_struct * mm_alloc(void);
2826 
2827 /* mmdrop drops the mm and the page tables */
2828 extern void __mmdrop(struct mm_struct *);
2829 static inline void mmdrop(struct mm_struct *mm)
2830 {
2831 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2832 		__mmdrop(mm);
2833 }
2834 
2835 static inline bool mmget_not_zero(struct mm_struct *mm)
2836 {
2837 	return atomic_inc_not_zero(&mm->mm_users);
2838 }
2839 
2840 /* mmput gets rid of the mappings and all user-space */
2841 extern void mmput(struct mm_struct *);
2842 #ifdef CONFIG_MMU
2843 /* same as above but performs the slow path from the async context. Can
2844  * be called from the atomic context as well
2845  */
2846 extern void mmput_async(struct mm_struct *);
2847 #endif
2848 
2849 /* Grab a reference to a task's mm, if it is not already going away */
2850 extern struct mm_struct *get_task_mm(struct task_struct *task);
2851 /*
2852  * Grab a reference to a task's mm, if it is not already going away
2853  * and ptrace_may_access with the mode parameter passed to it
2854  * succeeds.
2855  */
2856 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2857 /* Remove the current tasks stale references to the old mm_struct */
2858 extern void mm_release(struct task_struct *, struct mm_struct *);
2859 
2860 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
2861 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2862 			struct task_struct *, unsigned long);
2863 #else
2864 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2865 			struct task_struct *);
2866 
2867 /* Architectures that haven't opted into copy_thread_tls get the tls argument
2868  * via pt_regs, so ignore the tls argument passed via C. */
2869 static inline int copy_thread_tls(
2870 		unsigned long clone_flags, unsigned long sp, unsigned long arg,
2871 		struct task_struct *p, unsigned long tls)
2872 {
2873 	return copy_thread(clone_flags, sp, arg, p);
2874 }
2875 #endif
2876 extern void flush_thread(void);
2877 
2878 #ifdef CONFIG_HAVE_EXIT_THREAD
2879 extern void exit_thread(struct task_struct *tsk);
2880 #else
2881 static inline void exit_thread(struct task_struct *tsk)
2882 {
2883 }
2884 #endif
2885 
2886 extern void exit_files(struct task_struct *);
2887 extern void __cleanup_sighand(struct sighand_struct *);
2888 
2889 extern void exit_itimers(struct signal_struct *);
2890 extern void flush_itimer_signals(void);
2891 
2892 extern void do_group_exit(int);
2893 
2894 extern int do_execve(struct filename *,
2895 		     const char __user * const __user *,
2896 		     const char __user * const __user *);
2897 extern int do_execveat(int, struct filename *,
2898 		       const char __user * const __user *,
2899 		       const char __user * const __user *,
2900 		       int);
2901 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2902 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2903 struct task_struct *fork_idle(int);
2904 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2905 
2906 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2907 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2908 {
2909 	__set_task_comm(tsk, from, false);
2910 }
2911 extern char *get_task_comm(char *to, struct task_struct *tsk);
2912 
2913 #ifdef CONFIG_SMP
2914 void scheduler_ipi(void);
2915 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2916 #else
2917 static inline void scheduler_ipi(void) { }
2918 static inline unsigned long wait_task_inactive(struct task_struct *p,
2919 					       long match_state)
2920 {
2921 	return 1;
2922 }
2923 #endif
2924 
2925 #define tasklist_empty() \
2926 	list_empty(&init_task.tasks)
2927 
2928 #define next_task(p) \
2929 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2930 
2931 #define for_each_process(p) \
2932 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2933 
2934 extern bool current_is_single_threaded(void);
2935 
2936 /*
2937  * Careful: do_each_thread/while_each_thread is a double loop so
2938  *          'break' will not work as expected - use goto instead.
2939  */
2940 #define do_each_thread(g, t) \
2941 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2942 
2943 #define while_each_thread(g, t) \
2944 	while ((t = next_thread(t)) != g)
2945 
2946 #define __for_each_thread(signal, t)	\
2947 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2948 
2949 #define for_each_thread(p, t)		\
2950 	__for_each_thread((p)->signal, t)
2951 
2952 /* Careful: this is a double loop, 'break' won't work as expected. */
2953 #define for_each_process_thread(p, t)	\
2954 	for_each_process(p) for_each_thread(p, t)
2955 
2956 static inline int get_nr_threads(struct task_struct *tsk)
2957 {
2958 	return tsk->signal->nr_threads;
2959 }
2960 
2961 static inline bool thread_group_leader(struct task_struct *p)
2962 {
2963 	return p->exit_signal >= 0;
2964 }
2965 
2966 /* Do to the insanities of de_thread it is possible for a process
2967  * to have the pid of the thread group leader without actually being
2968  * the thread group leader.  For iteration through the pids in proc
2969  * all we care about is that we have a task with the appropriate
2970  * pid, we don't actually care if we have the right task.
2971  */
2972 static inline bool has_group_leader_pid(struct task_struct *p)
2973 {
2974 	return task_pid(p) == p->signal->leader_pid;
2975 }
2976 
2977 static inline
2978 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2979 {
2980 	return p1->signal == p2->signal;
2981 }
2982 
2983 static inline struct task_struct *next_thread(const struct task_struct *p)
2984 {
2985 	return list_entry_rcu(p->thread_group.next,
2986 			      struct task_struct, thread_group);
2987 }
2988 
2989 static inline int thread_group_empty(struct task_struct *p)
2990 {
2991 	return list_empty(&p->thread_group);
2992 }
2993 
2994 #define delay_group_leader(p) \
2995 		(thread_group_leader(p) && !thread_group_empty(p))
2996 
2997 /*
2998  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2999  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
3000  * pins the final release of task.io_context.  Also protects ->cpuset and
3001  * ->cgroup.subsys[]. And ->vfork_done.
3002  *
3003  * Nests both inside and outside of read_lock(&tasklist_lock).
3004  * It must not be nested with write_lock_irq(&tasklist_lock),
3005  * neither inside nor outside.
3006  */
3007 static inline void task_lock(struct task_struct *p)
3008 {
3009 	spin_lock(&p->alloc_lock);
3010 }
3011 
3012 static inline void task_unlock(struct task_struct *p)
3013 {
3014 	spin_unlock(&p->alloc_lock);
3015 }
3016 
3017 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
3018 							unsigned long *flags);
3019 
3020 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3021 						       unsigned long *flags)
3022 {
3023 	struct sighand_struct *ret;
3024 
3025 	ret = __lock_task_sighand(tsk, flags);
3026 	(void)__cond_lock(&tsk->sighand->siglock, ret);
3027 	return ret;
3028 }
3029 
3030 static inline void unlock_task_sighand(struct task_struct *tsk,
3031 						unsigned long *flags)
3032 {
3033 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3034 }
3035 
3036 /**
3037  * threadgroup_change_begin - mark the beginning of changes to a threadgroup
3038  * @tsk: task causing the changes
3039  *
3040  * All operations which modify a threadgroup - a new thread joining the
3041  * group, death of a member thread (the assertion of PF_EXITING) and
3042  * exec(2) dethreading the process and replacing the leader - are wrapped
3043  * by threadgroup_change_{begin|end}().  This is to provide a place which
3044  * subsystems needing threadgroup stability can hook into for
3045  * synchronization.
3046  */
3047 static inline void threadgroup_change_begin(struct task_struct *tsk)
3048 {
3049 	might_sleep();
3050 	cgroup_threadgroup_change_begin(tsk);
3051 }
3052 
3053 /**
3054  * threadgroup_change_end - mark the end of changes to a threadgroup
3055  * @tsk: task causing the changes
3056  *
3057  * See threadgroup_change_begin().
3058  */
3059 static inline void threadgroup_change_end(struct task_struct *tsk)
3060 {
3061 	cgroup_threadgroup_change_end(tsk);
3062 }
3063 
3064 #ifndef __HAVE_THREAD_FUNCTIONS
3065 
3066 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
3067 #define task_stack_page(task)	((task)->stack)
3068 
3069 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3070 {
3071 	*task_thread_info(p) = *task_thread_info(org);
3072 	task_thread_info(p)->task = p;
3073 }
3074 
3075 /*
3076  * Return the address of the last usable long on the stack.
3077  *
3078  * When the stack grows down, this is just above the thread
3079  * info struct. Going any lower will corrupt the threadinfo.
3080  *
3081  * When the stack grows up, this is the highest address.
3082  * Beyond that position, we corrupt data on the next page.
3083  */
3084 static inline unsigned long *end_of_stack(struct task_struct *p)
3085 {
3086 #ifdef CONFIG_STACK_GROWSUP
3087 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3088 #else
3089 	return (unsigned long *)(task_thread_info(p) + 1);
3090 #endif
3091 }
3092 
3093 #endif
3094 #define task_stack_end_corrupted(task) \
3095 		(*(end_of_stack(task)) != STACK_END_MAGIC)
3096 
3097 static inline int object_is_on_stack(void *obj)
3098 {
3099 	void *stack = task_stack_page(current);
3100 
3101 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3102 }
3103 
3104 extern void thread_stack_cache_init(void);
3105 
3106 #ifdef CONFIG_DEBUG_STACK_USAGE
3107 static inline unsigned long stack_not_used(struct task_struct *p)
3108 {
3109 	unsigned long *n = end_of_stack(p);
3110 
3111 	do { 	/* Skip over canary */
3112 # ifdef CONFIG_STACK_GROWSUP
3113 		n--;
3114 # else
3115 		n++;
3116 # endif
3117 	} while (!*n);
3118 
3119 # ifdef CONFIG_STACK_GROWSUP
3120 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
3121 # else
3122 	return (unsigned long)n - (unsigned long)end_of_stack(p);
3123 # endif
3124 }
3125 #endif
3126 extern void set_task_stack_end_magic(struct task_struct *tsk);
3127 
3128 /* set thread flags in other task's structures
3129  * - see asm/thread_info.h for TIF_xxxx flags available
3130  */
3131 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3132 {
3133 	set_ti_thread_flag(task_thread_info(tsk), flag);
3134 }
3135 
3136 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3137 {
3138 	clear_ti_thread_flag(task_thread_info(tsk), flag);
3139 }
3140 
3141 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3142 {
3143 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
3144 }
3145 
3146 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3147 {
3148 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
3149 }
3150 
3151 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3152 {
3153 	return test_ti_thread_flag(task_thread_info(tsk), flag);
3154 }
3155 
3156 static inline void set_tsk_need_resched(struct task_struct *tsk)
3157 {
3158 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3159 }
3160 
3161 static inline void clear_tsk_need_resched(struct task_struct *tsk)
3162 {
3163 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3164 }
3165 
3166 static inline int test_tsk_need_resched(struct task_struct *tsk)
3167 {
3168 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3169 }
3170 
3171 static inline int restart_syscall(void)
3172 {
3173 	set_tsk_thread_flag(current, TIF_SIGPENDING);
3174 	return -ERESTARTNOINTR;
3175 }
3176 
3177 static inline int signal_pending(struct task_struct *p)
3178 {
3179 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3180 }
3181 
3182 static inline int __fatal_signal_pending(struct task_struct *p)
3183 {
3184 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
3185 }
3186 
3187 static inline int fatal_signal_pending(struct task_struct *p)
3188 {
3189 	return signal_pending(p) && __fatal_signal_pending(p);
3190 }
3191 
3192 static inline int signal_pending_state(long state, struct task_struct *p)
3193 {
3194 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3195 		return 0;
3196 	if (!signal_pending(p))
3197 		return 0;
3198 
3199 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3200 }
3201 
3202 /*
3203  * cond_resched() and cond_resched_lock(): latency reduction via
3204  * explicit rescheduling in places that are safe. The return
3205  * value indicates whether a reschedule was done in fact.
3206  * cond_resched_lock() will drop the spinlock before scheduling,
3207  * cond_resched_softirq() will enable bhs before scheduling.
3208  */
3209 extern int _cond_resched(void);
3210 
3211 #define cond_resched() ({			\
3212 	___might_sleep(__FILE__, __LINE__, 0);	\
3213 	_cond_resched();			\
3214 })
3215 
3216 extern int __cond_resched_lock(spinlock_t *lock);
3217 
3218 #define cond_resched_lock(lock) ({				\
3219 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3220 	__cond_resched_lock(lock);				\
3221 })
3222 
3223 extern int __cond_resched_softirq(void);
3224 
3225 #define cond_resched_softirq() ({					\
3226 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
3227 	__cond_resched_softirq();					\
3228 })
3229 
3230 static inline void cond_resched_rcu(void)
3231 {
3232 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3233 	rcu_read_unlock();
3234 	cond_resched();
3235 	rcu_read_lock();
3236 #endif
3237 }
3238 
3239 /*
3240  * Does a critical section need to be broken due to another
3241  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
3242  * but a general need for low latency)
3243  */
3244 static inline int spin_needbreak(spinlock_t *lock)
3245 {
3246 #ifdef CONFIG_PREEMPT
3247 	return spin_is_contended(lock);
3248 #else
3249 	return 0;
3250 #endif
3251 }
3252 
3253 /*
3254  * Idle thread specific functions to determine the need_resched
3255  * polling state.
3256  */
3257 #ifdef TIF_POLLING_NRFLAG
3258 static inline int tsk_is_polling(struct task_struct *p)
3259 {
3260 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3261 }
3262 
3263 static inline void __current_set_polling(void)
3264 {
3265 	set_thread_flag(TIF_POLLING_NRFLAG);
3266 }
3267 
3268 static inline bool __must_check current_set_polling_and_test(void)
3269 {
3270 	__current_set_polling();
3271 
3272 	/*
3273 	 * Polling state must be visible before we test NEED_RESCHED,
3274 	 * paired by resched_curr()
3275 	 */
3276 	smp_mb__after_atomic();
3277 
3278 	return unlikely(tif_need_resched());
3279 }
3280 
3281 static inline void __current_clr_polling(void)
3282 {
3283 	clear_thread_flag(TIF_POLLING_NRFLAG);
3284 }
3285 
3286 static inline bool __must_check current_clr_polling_and_test(void)
3287 {
3288 	__current_clr_polling();
3289 
3290 	/*
3291 	 * Polling state must be visible before we test NEED_RESCHED,
3292 	 * paired by resched_curr()
3293 	 */
3294 	smp_mb__after_atomic();
3295 
3296 	return unlikely(tif_need_resched());
3297 }
3298 
3299 #else
3300 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3301 static inline void __current_set_polling(void) { }
3302 static inline void __current_clr_polling(void) { }
3303 
3304 static inline bool __must_check current_set_polling_and_test(void)
3305 {
3306 	return unlikely(tif_need_resched());
3307 }
3308 static inline bool __must_check current_clr_polling_and_test(void)
3309 {
3310 	return unlikely(tif_need_resched());
3311 }
3312 #endif
3313 
3314 static inline void current_clr_polling(void)
3315 {
3316 	__current_clr_polling();
3317 
3318 	/*
3319 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
3320 	 * Once the bit is cleared, we'll get IPIs with every new
3321 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
3322 	 * fold.
3323 	 */
3324 	smp_mb(); /* paired with resched_curr() */
3325 
3326 	preempt_fold_need_resched();
3327 }
3328 
3329 static __always_inline bool need_resched(void)
3330 {
3331 	return unlikely(tif_need_resched());
3332 }
3333 
3334 /*
3335  * Thread group CPU time accounting.
3336  */
3337 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3338 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3339 
3340 /*
3341  * Reevaluate whether the task has signals pending delivery.
3342  * Wake the task if so.
3343  * This is required every time the blocked sigset_t changes.
3344  * callers must hold sighand->siglock.
3345  */
3346 extern void recalc_sigpending_and_wake(struct task_struct *t);
3347 extern void recalc_sigpending(void);
3348 
3349 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3350 
3351 static inline void signal_wake_up(struct task_struct *t, bool resume)
3352 {
3353 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3354 }
3355 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3356 {
3357 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3358 }
3359 
3360 /*
3361  * Wrappers for p->thread_info->cpu access. No-op on UP.
3362  */
3363 #ifdef CONFIG_SMP
3364 
3365 static inline unsigned int task_cpu(const struct task_struct *p)
3366 {
3367 	return task_thread_info(p)->cpu;
3368 }
3369 
3370 static inline int task_node(const struct task_struct *p)
3371 {
3372 	return cpu_to_node(task_cpu(p));
3373 }
3374 
3375 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3376 
3377 #else
3378 
3379 static inline unsigned int task_cpu(const struct task_struct *p)
3380 {
3381 	return 0;
3382 }
3383 
3384 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3385 {
3386 }
3387 
3388 #endif /* CONFIG_SMP */
3389 
3390 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3391 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3392 
3393 #ifdef CONFIG_CGROUP_SCHED
3394 extern struct task_group root_task_group;
3395 #endif /* CONFIG_CGROUP_SCHED */
3396 
3397 extern int task_can_switch_user(struct user_struct *up,
3398 					struct task_struct *tsk);
3399 
3400 #ifdef CONFIG_TASK_XACCT
3401 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3402 {
3403 	tsk->ioac.rchar += amt;
3404 }
3405 
3406 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3407 {
3408 	tsk->ioac.wchar += amt;
3409 }
3410 
3411 static inline void inc_syscr(struct task_struct *tsk)
3412 {
3413 	tsk->ioac.syscr++;
3414 }
3415 
3416 static inline void inc_syscw(struct task_struct *tsk)
3417 {
3418 	tsk->ioac.syscw++;
3419 }
3420 #else
3421 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3422 {
3423 }
3424 
3425 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3426 {
3427 }
3428 
3429 static inline void inc_syscr(struct task_struct *tsk)
3430 {
3431 }
3432 
3433 static inline void inc_syscw(struct task_struct *tsk)
3434 {
3435 }
3436 #endif
3437 
3438 #ifndef TASK_SIZE_OF
3439 #define TASK_SIZE_OF(tsk)	TASK_SIZE
3440 #endif
3441 
3442 #ifdef CONFIG_MEMCG
3443 extern void mm_update_next_owner(struct mm_struct *mm);
3444 #else
3445 static inline void mm_update_next_owner(struct mm_struct *mm)
3446 {
3447 }
3448 #endif /* CONFIG_MEMCG */
3449 
3450 static inline unsigned long task_rlimit(const struct task_struct *tsk,
3451 		unsigned int limit)
3452 {
3453 	return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3454 }
3455 
3456 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3457 		unsigned int limit)
3458 {
3459 	return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3460 }
3461 
3462 static inline unsigned long rlimit(unsigned int limit)
3463 {
3464 	return task_rlimit(current, limit);
3465 }
3466 
3467 static inline unsigned long rlimit_max(unsigned int limit)
3468 {
3469 	return task_rlimit_max(current, limit);
3470 }
3471 
3472 #ifdef CONFIG_CPU_FREQ
3473 struct update_util_data {
3474 	void (*func)(struct update_util_data *data,
3475 		     u64 time, unsigned long util, unsigned long max);
3476 };
3477 
3478 void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3479 			void (*func)(struct update_util_data *data, u64 time,
3480 				     unsigned long util, unsigned long max));
3481 void cpufreq_remove_update_util_hook(int cpu);
3482 #endif /* CONFIG_CPU_FREQ */
3483 
3484 #endif
3485