xref: /linux/include/linux/sched.h (revision 990d627f80c3f7b23ca4059ef765617225bccb26)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/mutex.h>
18 #include <linux/plist.h>
19 #include <linux/hrtimer.h>
20 #include <linux/irqflags.h>
21 #include <linux/seccomp.h>
22 #include <linux/nodemask.h>
23 #include <linux/rcupdate.h>
24 #include <linux/refcount.h>
25 #include <linux/resource.h>
26 #include <linux/latencytop.h>
27 #include <linux/sched/prio.h>
28 #include <linux/sched/types.h>
29 #include <linux/signal_types.h>
30 #include <linux/syscall_user_dispatch.h>
31 #include <linux/mm_types_task.h>
32 #include <linux/task_io_accounting.h>
33 #include <linux/posix-timers.h>
34 #include <linux/rseq.h>
35 #include <linux/seqlock.h>
36 #include <linux/kcsan.h>
37 #include <asm/kmap_size.h>
38 
39 /* task_struct member predeclarations (sorted alphabetically): */
40 struct audit_context;
41 struct backing_dev_info;
42 struct bio_list;
43 struct blk_plug;
44 struct bpf_local_storage;
45 struct bpf_run_ctx;
46 struct capture_control;
47 struct cfs_rq;
48 struct fs_struct;
49 struct futex_pi_state;
50 struct io_context;
51 struct io_uring_task;
52 struct mempolicy;
53 struct nameidata;
54 struct nsproxy;
55 struct perf_event_context;
56 struct pid_namespace;
57 struct pipe_inode_info;
58 struct rcu_node;
59 struct reclaim_state;
60 struct robust_list_head;
61 struct root_domain;
62 struct rq;
63 struct sched_attr;
64 struct sched_param;
65 struct seq_file;
66 struct sighand_struct;
67 struct signal_struct;
68 struct task_delay_info;
69 struct task_group;
70 
71 /*
72  * Task state bitmask. NOTE! These bits are also
73  * encoded in fs/proc/array.c: get_task_state().
74  *
75  * We have two separate sets of flags: task->state
76  * is about runnability, while task->exit_state are
77  * about the task exiting. Confusing, but this way
78  * modifying one set can't modify the other one by
79  * mistake.
80  */
81 
82 /* Used in tsk->state: */
83 #define TASK_RUNNING			0x0000
84 #define TASK_INTERRUPTIBLE		0x0001
85 #define TASK_UNINTERRUPTIBLE		0x0002
86 #define __TASK_STOPPED			0x0004
87 #define __TASK_TRACED			0x0008
88 /* Used in tsk->exit_state: */
89 #define EXIT_DEAD			0x0010
90 #define EXIT_ZOMBIE			0x0020
91 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
92 /* Used in tsk->state again: */
93 #define TASK_PARKED			0x0040
94 #define TASK_DEAD			0x0080
95 #define TASK_WAKEKILL			0x0100
96 #define TASK_WAKING			0x0200
97 #define TASK_NOLOAD			0x0400
98 #define TASK_NEW			0x0800
99 /* RT specific auxilliary flag to mark RT lock waiters */
100 #define TASK_RTLOCK_WAIT		0x1000
101 #define TASK_STATE_MAX			0x2000
102 
103 /* Convenience macros for the sake of set_current_state: */
104 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
105 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
106 #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
107 
108 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
109 
110 /* Convenience macros for the sake of wake_up(): */
111 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
112 
113 /* get_task_state(): */
114 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
115 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
116 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
117 					 TASK_PARKED)
118 
119 #define task_is_running(task)		(READ_ONCE((task)->__state) == TASK_RUNNING)
120 
121 #define task_is_traced(task)		((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
122 
123 #define task_is_stopped(task)		((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
124 
125 #define task_is_stopped_or_traced(task)	((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
126 
127 /*
128  * Special states are those that do not use the normal wait-loop pattern. See
129  * the comment with set_special_state().
130  */
131 #define is_special_task_state(state)				\
132 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
133 
134 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
135 # define debug_normal_state_change(state_value)				\
136 	do {								\
137 		WARN_ON_ONCE(is_special_task_state(state_value));	\
138 		current->task_state_change = _THIS_IP_;			\
139 	} while (0)
140 
141 # define debug_special_state_change(state_value)			\
142 	do {								\
143 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
144 		current->task_state_change = _THIS_IP_;			\
145 	} while (0)
146 
147 # define debug_rtlock_wait_set_state()					\
148 	do {								 \
149 		current->saved_state_change = current->task_state_change;\
150 		current->task_state_change = _THIS_IP_;			 \
151 	} while (0)
152 
153 # define debug_rtlock_wait_restore_state()				\
154 	do {								 \
155 		current->task_state_change = current->saved_state_change;\
156 	} while (0)
157 
158 #else
159 # define debug_normal_state_change(cond)	do { } while (0)
160 # define debug_special_state_change(cond)	do { } while (0)
161 # define debug_rtlock_wait_set_state()		do { } while (0)
162 # define debug_rtlock_wait_restore_state()	do { } while (0)
163 #endif
164 
165 /*
166  * set_current_state() includes a barrier so that the write of current->state
167  * is correctly serialised wrt the caller's subsequent test of whether to
168  * actually sleep:
169  *
170  *   for (;;) {
171  *	set_current_state(TASK_UNINTERRUPTIBLE);
172  *	if (CONDITION)
173  *	   break;
174  *
175  *	schedule();
176  *   }
177  *   __set_current_state(TASK_RUNNING);
178  *
179  * If the caller does not need such serialisation (because, for instance, the
180  * CONDITION test and condition change and wakeup are under the same lock) then
181  * use __set_current_state().
182  *
183  * The above is typically ordered against the wakeup, which does:
184  *
185  *   CONDITION = 1;
186  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
187  *
188  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
189  * accessing p->state.
190  *
191  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
192  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
193  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
194  *
195  * However, with slightly different timing the wakeup TASK_RUNNING store can
196  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
197  * a problem either because that will result in one extra go around the loop
198  * and our @cond test will save the day.
199  *
200  * Also see the comments of try_to_wake_up().
201  */
202 #define __set_current_state(state_value)				\
203 	do {								\
204 		debug_normal_state_change((state_value));		\
205 		WRITE_ONCE(current->__state, (state_value));		\
206 	} while (0)
207 
208 #define set_current_state(state_value)					\
209 	do {								\
210 		debug_normal_state_change((state_value));		\
211 		smp_store_mb(current->__state, (state_value));		\
212 	} while (0)
213 
214 /*
215  * set_special_state() should be used for those states when the blocking task
216  * can not use the regular condition based wait-loop. In that case we must
217  * serialize against wakeups such that any possible in-flight TASK_RUNNING
218  * stores will not collide with our state change.
219  */
220 #define set_special_state(state_value)					\
221 	do {								\
222 		unsigned long flags; /* may shadow */			\
223 									\
224 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
225 		debug_special_state_change((state_value));		\
226 		WRITE_ONCE(current->__state, (state_value));		\
227 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
228 	} while (0)
229 
230 /*
231  * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
232  *
233  * RT's spin/rwlock substitutions are state preserving. The state of the
234  * task when blocking on the lock is saved in task_struct::saved_state and
235  * restored after the lock has been acquired.  These operations are
236  * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
237  * lock related wakeups while the task is blocked on the lock are
238  * redirected to operate on task_struct::saved_state to ensure that these
239  * are not dropped. On restore task_struct::saved_state is set to
240  * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
241  *
242  * The lock operation looks like this:
243  *
244  *	current_save_and_set_rtlock_wait_state();
245  *	for (;;) {
246  *		if (try_lock())
247  *			break;
248  *		raw_spin_unlock_irq(&lock->wait_lock);
249  *		schedule_rtlock();
250  *		raw_spin_lock_irq(&lock->wait_lock);
251  *		set_current_state(TASK_RTLOCK_WAIT);
252  *	}
253  *	current_restore_rtlock_saved_state();
254  */
255 #define current_save_and_set_rtlock_wait_state()			\
256 	do {								\
257 		lockdep_assert_irqs_disabled();				\
258 		raw_spin_lock(&current->pi_lock);			\
259 		current->saved_state = current->__state;		\
260 		debug_rtlock_wait_set_state();				\
261 		WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);		\
262 		raw_spin_unlock(&current->pi_lock);			\
263 	} while (0);
264 
265 #define current_restore_rtlock_saved_state()				\
266 	do {								\
267 		lockdep_assert_irqs_disabled();				\
268 		raw_spin_lock(&current->pi_lock);			\
269 		debug_rtlock_wait_restore_state();			\
270 		WRITE_ONCE(current->__state, current->saved_state);	\
271 		current->saved_state = TASK_RUNNING;			\
272 		raw_spin_unlock(&current->pi_lock);			\
273 	} while (0);
274 
275 #define get_current_state()	READ_ONCE(current->__state)
276 
277 /*
278  * Define the task command name length as enum, then it can be visible to
279  * BPF programs.
280  */
281 enum {
282 	TASK_COMM_LEN = 16,
283 };
284 
285 extern void scheduler_tick(void);
286 
287 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
288 
289 extern long schedule_timeout(long timeout);
290 extern long schedule_timeout_interruptible(long timeout);
291 extern long schedule_timeout_killable(long timeout);
292 extern long schedule_timeout_uninterruptible(long timeout);
293 extern long schedule_timeout_idle(long timeout);
294 asmlinkage void schedule(void);
295 extern void schedule_preempt_disabled(void);
296 asmlinkage void preempt_schedule_irq(void);
297 #ifdef CONFIG_PREEMPT_RT
298  extern void schedule_rtlock(void);
299 #endif
300 
301 extern int __must_check io_schedule_prepare(void);
302 extern void io_schedule_finish(int token);
303 extern long io_schedule_timeout(long timeout);
304 extern void io_schedule(void);
305 
306 /**
307  * struct prev_cputime - snapshot of system and user cputime
308  * @utime: time spent in user mode
309  * @stime: time spent in system mode
310  * @lock: protects the above two fields
311  *
312  * Stores previous user/system time values such that we can guarantee
313  * monotonicity.
314  */
315 struct prev_cputime {
316 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
317 	u64				utime;
318 	u64				stime;
319 	raw_spinlock_t			lock;
320 #endif
321 };
322 
323 enum vtime_state {
324 	/* Task is sleeping or running in a CPU with VTIME inactive: */
325 	VTIME_INACTIVE = 0,
326 	/* Task is idle */
327 	VTIME_IDLE,
328 	/* Task runs in kernelspace in a CPU with VTIME active: */
329 	VTIME_SYS,
330 	/* Task runs in userspace in a CPU with VTIME active: */
331 	VTIME_USER,
332 	/* Task runs as guests in a CPU with VTIME active: */
333 	VTIME_GUEST,
334 };
335 
336 struct vtime {
337 	seqcount_t		seqcount;
338 	unsigned long long	starttime;
339 	enum vtime_state	state;
340 	unsigned int		cpu;
341 	u64			utime;
342 	u64			stime;
343 	u64			gtime;
344 };
345 
346 /*
347  * Utilization clamp constraints.
348  * @UCLAMP_MIN:	Minimum utilization
349  * @UCLAMP_MAX:	Maximum utilization
350  * @UCLAMP_CNT:	Utilization clamp constraints count
351  */
352 enum uclamp_id {
353 	UCLAMP_MIN = 0,
354 	UCLAMP_MAX,
355 	UCLAMP_CNT
356 };
357 
358 #ifdef CONFIG_SMP
359 extern struct root_domain def_root_domain;
360 extern struct mutex sched_domains_mutex;
361 #endif
362 
363 struct sched_info {
364 #ifdef CONFIG_SCHED_INFO
365 	/* Cumulative counters: */
366 
367 	/* # of times we have run on this CPU: */
368 	unsigned long			pcount;
369 
370 	/* Time spent waiting on a runqueue: */
371 	unsigned long long		run_delay;
372 
373 	/* Timestamps: */
374 
375 	/* When did we last run on a CPU? */
376 	unsigned long long		last_arrival;
377 
378 	/* When were we last queued to run? */
379 	unsigned long long		last_queued;
380 
381 #endif /* CONFIG_SCHED_INFO */
382 };
383 
384 /*
385  * Integer metrics need fixed point arithmetic, e.g., sched/fair
386  * has a few: load, load_avg, util_avg, freq, and capacity.
387  *
388  * We define a basic fixed point arithmetic range, and then formalize
389  * all these metrics based on that basic range.
390  */
391 # define SCHED_FIXEDPOINT_SHIFT		10
392 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
393 
394 /* Increase resolution of cpu_capacity calculations */
395 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
396 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
397 
398 struct load_weight {
399 	unsigned long			weight;
400 	u32				inv_weight;
401 };
402 
403 /**
404  * struct util_est - Estimation utilization of FAIR tasks
405  * @enqueued: instantaneous estimated utilization of a task/cpu
406  * @ewma:     the Exponential Weighted Moving Average (EWMA)
407  *            utilization of a task
408  *
409  * Support data structure to track an Exponential Weighted Moving Average
410  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
411  * average each time a task completes an activation. Sample's weight is chosen
412  * so that the EWMA will be relatively insensitive to transient changes to the
413  * task's workload.
414  *
415  * The enqueued attribute has a slightly different meaning for tasks and cpus:
416  * - task:   the task's util_avg at last task dequeue time
417  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
418  * Thus, the util_est.enqueued of a task represents the contribution on the
419  * estimated utilization of the CPU where that task is currently enqueued.
420  *
421  * Only for tasks we track a moving average of the past instantaneous
422  * estimated utilization. This allows to absorb sporadic drops in utilization
423  * of an otherwise almost periodic task.
424  *
425  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
426  * updates. When a task is dequeued, its util_est should not be updated if its
427  * util_avg has not been updated in the meantime.
428  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
429  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
430  * for a task) it is safe to use MSB.
431  */
432 struct util_est {
433 	unsigned int			enqueued;
434 	unsigned int			ewma;
435 #define UTIL_EST_WEIGHT_SHIFT		2
436 #define UTIL_AVG_UNCHANGED		0x80000000
437 } __attribute__((__aligned__(sizeof(u64))));
438 
439 /*
440  * The load/runnable/util_avg accumulates an infinite geometric series
441  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
442  *
443  * [load_avg definition]
444  *
445  *   load_avg = runnable% * scale_load_down(load)
446  *
447  * [runnable_avg definition]
448  *
449  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
450  *
451  * [util_avg definition]
452  *
453  *   util_avg = running% * SCHED_CAPACITY_SCALE
454  *
455  * where runnable% is the time ratio that a sched_entity is runnable and
456  * running% the time ratio that a sched_entity is running.
457  *
458  * For cfs_rq, they are the aggregated values of all runnable and blocked
459  * sched_entities.
460  *
461  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
462  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
463  * for computing those signals (see update_rq_clock_pelt())
464  *
465  * N.B., the above ratios (runnable% and running%) themselves are in the
466  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
467  * to as large a range as necessary. This is for example reflected by
468  * util_avg's SCHED_CAPACITY_SCALE.
469  *
470  * [Overflow issue]
471  *
472  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
473  * with the highest load (=88761), always runnable on a single cfs_rq,
474  * and should not overflow as the number already hits PID_MAX_LIMIT.
475  *
476  * For all other cases (including 32-bit kernels), struct load_weight's
477  * weight will overflow first before we do, because:
478  *
479  *    Max(load_avg) <= Max(load.weight)
480  *
481  * Then it is the load_weight's responsibility to consider overflow
482  * issues.
483  */
484 struct sched_avg {
485 	u64				last_update_time;
486 	u64				load_sum;
487 	u64				runnable_sum;
488 	u32				util_sum;
489 	u32				period_contrib;
490 	unsigned long			load_avg;
491 	unsigned long			runnable_avg;
492 	unsigned long			util_avg;
493 	struct util_est			util_est;
494 } ____cacheline_aligned;
495 
496 struct sched_statistics {
497 #ifdef CONFIG_SCHEDSTATS
498 	u64				wait_start;
499 	u64				wait_max;
500 	u64				wait_count;
501 	u64				wait_sum;
502 	u64				iowait_count;
503 	u64				iowait_sum;
504 
505 	u64				sleep_start;
506 	u64				sleep_max;
507 	s64				sum_sleep_runtime;
508 
509 	u64				block_start;
510 	u64				block_max;
511 	s64				sum_block_runtime;
512 
513 	u64				exec_max;
514 	u64				slice_max;
515 
516 	u64				nr_migrations_cold;
517 	u64				nr_failed_migrations_affine;
518 	u64				nr_failed_migrations_running;
519 	u64				nr_failed_migrations_hot;
520 	u64				nr_forced_migrations;
521 
522 	u64				nr_wakeups;
523 	u64				nr_wakeups_sync;
524 	u64				nr_wakeups_migrate;
525 	u64				nr_wakeups_local;
526 	u64				nr_wakeups_remote;
527 	u64				nr_wakeups_affine;
528 	u64				nr_wakeups_affine_attempts;
529 	u64				nr_wakeups_passive;
530 	u64				nr_wakeups_idle;
531 
532 #ifdef CONFIG_SCHED_CORE
533 	u64				core_forceidle_sum;
534 #endif
535 #endif /* CONFIG_SCHEDSTATS */
536 } ____cacheline_aligned;
537 
538 struct sched_entity {
539 	/* For load-balancing: */
540 	struct load_weight		load;
541 	struct rb_node			run_node;
542 	struct list_head		group_node;
543 	unsigned int			on_rq;
544 
545 	u64				exec_start;
546 	u64				sum_exec_runtime;
547 	u64				vruntime;
548 	u64				prev_sum_exec_runtime;
549 
550 	u64				nr_migrations;
551 
552 #ifdef CONFIG_FAIR_GROUP_SCHED
553 	int				depth;
554 	struct sched_entity		*parent;
555 	/* rq on which this entity is (to be) queued: */
556 	struct cfs_rq			*cfs_rq;
557 	/* rq "owned" by this entity/group: */
558 	struct cfs_rq			*my_q;
559 	/* cached value of my_q->h_nr_running */
560 	unsigned long			runnable_weight;
561 #endif
562 
563 #ifdef CONFIG_SMP
564 	/*
565 	 * Per entity load average tracking.
566 	 *
567 	 * Put into separate cache line so it does not
568 	 * collide with read-mostly values above.
569 	 */
570 	struct sched_avg		avg;
571 #endif
572 };
573 
574 struct sched_rt_entity {
575 	struct list_head		run_list;
576 	unsigned long			timeout;
577 	unsigned long			watchdog_stamp;
578 	unsigned int			time_slice;
579 	unsigned short			on_rq;
580 	unsigned short			on_list;
581 
582 	struct sched_rt_entity		*back;
583 #ifdef CONFIG_RT_GROUP_SCHED
584 	struct sched_rt_entity		*parent;
585 	/* rq on which this entity is (to be) queued: */
586 	struct rt_rq			*rt_rq;
587 	/* rq "owned" by this entity/group: */
588 	struct rt_rq			*my_q;
589 #endif
590 } __randomize_layout;
591 
592 struct sched_dl_entity {
593 	struct rb_node			rb_node;
594 
595 	/*
596 	 * Original scheduling parameters. Copied here from sched_attr
597 	 * during sched_setattr(), they will remain the same until
598 	 * the next sched_setattr().
599 	 */
600 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
601 	u64				dl_deadline;	/* Relative deadline of each instance	*/
602 	u64				dl_period;	/* Separation of two instances (period) */
603 	u64				dl_bw;		/* dl_runtime / dl_period		*/
604 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
605 
606 	/*
607 	 * Actual scheduling parameters. Initialized with the values above,
608 	 * they are continuously updated during task execution. Note that
609 	 * the remaining runtime could be < 0 in case we are in overrun.
610 	 */
611 	s64				runtime;	/* Remaining runtime for this instance	*/
612 	u64				deadline;	/* Absolute deadline for this instance	*/
613 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
614 
615 	/*
616 	 * Some bool flags:
617 	 *
618 	 * @dl_throttled tells if we exhausted the runtime. If so, the
619 	 * task has to wait for a replenishment to be performed at the
620 	 * next firing of dl_timer.
621 	 *
622 	 * @dl_boosted tells if we are boosted due to DI. If so we are
623 	 * outside bandwidth enforcement mechanism (but only until we
624 	 * exit the critical section);
625 	 *
626 	 * @dl_yielded tells if task gave up the CPU before consuming
627 	 * all its available runtime during the last job.
628 	 *
629 	 * @dl_non_contending tells if the task is inactive while still
630 	 * contributing to the active utilization. In other words, it
631 	 * indicates if the inactive timer has been armed and its handler
632 	 * has not been executed yet. This flag is useful to avoid race
633 	 * conditions between the inactive timer handler and the wakeup
634 	 * code.
635 	 *
636 	 * @dl_overrun tells if the task asked to be informed about runtime
637 	 * overruns.
638 	 */
639 	unsigned int			dl_throttled      : 1;
640 	unsigned int			dl_yielded        : 1;
641 	unsigned int			dl_non_contending : 1;
642 	unsigned int			dl_overrun	  : 1;
643 
644 	/*
645 	 * Bandwidth enforcement timer. Each -deadline task has its
646 	 * own bandwidth to be enforced, thus we need one timer per task.
647 	 */
648 	struct hrtimer			dl_timer;
649 
650 	/*
651 	 * Inactive timer, responsible for decreasing the active utilization
652 	 * at the "0-lag time". When a -deadline task blocks, it contributes
653 	 * to GRUB's active utilization until the "0-lag time", hence a
654 	 * timer is needed to decrease the active utilization at the correct
655 	 * time.
656 	 */
657 	struct hrtimer inactive_timer;
658 
659 #ifdef CONFIG_RT_MUTEXES
660 	/*
661 	 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
662 	 * pi_se points to the donor, otherwise points to the dl_se it belongs
663 	 * to (the original one/itself).
664 	 */
665 	struct sched_dl_entity *pi_se;
666 #endif
667 };
668 
669 #ifdef CONFIG_UCLAMP_TASK
670 /* Number of utilization clamp buckets (shorter alias) */
671 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
672 
673 /*
674  * Utilization clamp for a scheduling entity
675  * @value:		clamp value "assigned" to a se
676  * @bucket_id:		bucket index corresponding to the "assigned" value
677  * @active:		the se is currently refcounted in a rq's bucket
678  * @user_defined:	the requested clamp value comes from user-space
679  *
680  * The bucket_id is the index of the clamp bucket matching the clamp value
681  * which is pre-computed and stored to avoid expensive integer divisions from
682  * the fast path.
683  *
684  * The active bit is set whenever a task has got an "effective" value assigned,
685  * which can be different from the clamp value "requested" from user-space.
686  * This allows to know a task is refcounted in the rq's bucket corresponding
687  * to the "effective" bucket_id.
688  *
689  * The user_defined bit is set whenever a task has got a task-specific clamp
690  * value requested from userspace, i.e. the system defaults apply to this task
691  * just as a restriction. This allows to relax default clamps when a less
692  * restrictive task-specific value has been requested, thus allowing to
693  * implement a "nice" semantic. For example, a task running with a 20%
694  * default boost can still drop its own boosting to 0%.
695  */
696 struct uclamp_se {
697 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
698 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
699 	unsigned int active		: 1;
700 	unsigned int user_defined	: 1;
701 };
702 #endif /* CONFIG_UCLAMP_TASK */
703 
704 union rcu_special {
705 	struct {
706 		u8			blocked;
707 		u8			need_qs;
708 		u8			exp_hint; /* Hint for performance. */
709 		u8			need_mb; /* Readers need smp_mb(). */
710 	} b; /* Bits. */
711 	u32 s; /* Set of bits. */
712 };
713 
714 enum perf_event_task_context {
715 	perf_invalid_context = -1,
716 	perf_hw_context = 0,
717 	perf_sw_context,
718 	perf_nr_task_contexts,
719 };
720 
721 struct wake_q_node {
722 	struct wake_q_node *next;
723 };
724 
725 struct kmap_ctrl {
726 #ifdef CONFIG_KMAP_LOCAL
727 	int				idx;
728 	pte_t				pteval[KM_MAX_IDX];
729 #endif
730 };
731 
732 struct task_struct {
733 #ifdef CONFIG_THREAD_INFO_IN_TASK
734 	/*
735 	 * For reasons of header soup (see current_thread_info()), this
736 	 * must be the first element of task_struct.
737 	 */
738 	struct thread_info		thread_info;
739 #endif
740 	unsigned int			__state;
741 
742 #ifdef CONFIG_PREEMPT_RT
743 	/* saved state for "spinlock sleepers" */
744 	unsigned int			saved_state;
745 #endif
746 
747 	/*
748 	 * This begins the randomizable portion of task_struct. Only
749 	 * scheduling-critical items should be added above here.
750 	 */
751 	randomized_struct_fields_start
752 
753 	void				*stack;
754 	refcount_t			usage;
755 	/* Per task flags (PF_*), defined further below: */
756 	unsigned int			flags;
757 	unsigned int			ptrace;
758 
759 #ifdef CONFIG_SMP
760 	int				on_cpu;
761 	struct __call_single_node	wake_entry;
762 	unsigned int			wakee_flips;
763 	unsigned long			wakee_flip_decay_ts;
764 	struct task_struct		*last_wakee;
765 
766 	/*
767 	 * recent_used_cpu is initially set as the last CPU used by a task
768 	 * that wakes affine another task. Waker/wakee relationships can
769 	 * push tasks around a CPU where each wakeup moves to the next one.
770 	 * Tracking a recently used CPU allows a quick search for a recently
771 	 * used CPU that may be idle.
772 	 */
773 	int				recent_used_cpu;
774 	int				wake_cpu;
775 #endif
776 	int				on_rq;
777 
778 	int				prio;
779 	int				static_prio;
780 	int				normal_prio;
781 	unsigned int			rt_priority;
782 
783 	struct sched_entity		se;
784 	struct sched_rt_entity		rt;
785 	struct sched_dl_entity		dl;
786 	const struct sched_class	*sched_class;
787 
788 #ifdef CONFIG_SCHED_CORE
789 	struct rb_node			core_node;
790 	unsigned long			core_cookie;
791 	unsigned int			core_occupation;
792 #endif
793 
794 #ifdef CONFIG_CGROUP_SCHED
795 	struct task_group		*sched_task_group;
796 #endif
797 
798 #ifdef CONFIG_UCLAMP_TASK
799 	/*
800 	 * Clamp values requested for a scheduling entity.
801 	 * Must be updated with task_rq_lock() held.
802 	 */
803 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
804 	/*
805 	 * Effective clamp values used for a scheduling entity.
806 	 * Must be updated with task_rq_lock() held.
807 	 */
808 	struct uclamp_se		uclamp[UCLAMP_CNT];
809 #endif
810 
811 	struct sched_statistics         stats;
812 
813 #ifdef CONFIG_PREEMPT_NOTIFIERS
814 	/* List of struct preempt_notifier: */
815 	struct hlist_head		preempt_notifiers;
816 #endif
817 
818 #ifdef CONFIG_BLK_DEV_IO_TRACE
819 	unsigned int			btrace_seq;
820 #endif
821 
822 	unsigned int			policy;
823 	int				nr_cpus_allowed;
824 	const cpumask_t			*cpus_ptr;
825 	cpumask_t			*user_cpus_ptr;
826 	cpumask_t			cpus_mask;
827 	void				*migration_pending;
828 #ifdef CONFIG_SMP
829 	unsigned short			migration_disabled;
830 #endif
831 	unsigned short			migration_flags;
832 
833 #ifdef CONFIG_PREEMPT_RCU
834 	int				rcu_read_lock_nesting;
835 	union rcu_special		rcu_read_unlock_special;
836 	struct list_head		rcu_node_entry;
837 	struct rcu_node			*rcu_blocked_node;
838 #endif /* #ifdef CONFIG_PREEMPT_RCU */
839 
840 #ifdef CONFIG_TASKS_RCU
841 	unsigned long			rcu_tasks_nvcsw;
842 	u8				rcu_tasks_holdout;
843 	u8				rcu_tasks_idx;
844 	int				rcu_tasks_idle_cpu;
845 	struct list_head		rcu_tasks_holdout_list;
846 #endif /* #ifdef CONFIG_TASKS_RCU */
847 
848 #ifdef CONFIG_TASKS_TRACE_RCU
849 	int				trc_reader_nesting;
850 	int				trc_ipi_to_cpu;
851 	union rcu_special		trc_reader_special;
852 	bool				trc_reader_checked;
853 	struct list_head		trc_holdout_list;
854 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
855 
856 	struct sched_info		sched_info;
857 
858 	struct list_head		tasks;
859 #ifdef CONFIG_SMP
860 	struct plist_node		pushable_tasks;
861 	struct rb_node			pushable_dl_tasks;
862 #endif
863 
864 	struct mm_struct		*mm;
865 	struct mm_struct		*active_mm;
866 
867 	/* Per-thread vma caching: */
868 	struct vmacache			vmacache;
869 
870 #ifdef SPLIT_RSS_COUNTING
871 	struct task_rss_stat		rss_stat;
872 #endif
873 	int				exit_state;
874 	int				exit_code;
875 	int				exit_signal;
876 	/* The signal sent when the parent dies: */
877 	int				pdeath_signal;
878 	/* JOBCTL_*, siglock protected: */
879 	unsigned long			jobctl;
880 
881 	/* Used for emulating ABI behavior of previous Linux versions: */
882 	unsigned int			personality;
883 
884 	/* Scheduler bits, serialized by scheduler locks: */
885 	unsigned			sched_reset_on_fork:1;
886 	unsigned			sched_contributes_to_load:1;
887 	unsigned			sched_migrated:1;
888 #ifdef CONFIG_PSI
889 	unsigned			sched_psi_wake_requeue:1;
890 #endif
891 
892 	/* Force alignment to the next boundary: */
893 	unsigned			:0;
894 
895 	/* Unserialized, strictly 'current' */
896 
897 	/*
898 	 * This field must not be in the scheduler word above due to wakelist
899 	 * queueing no longer being serialized by p->on_cpu. However:
900 	 *
901 	 * p->XXX = X;			ttwu()
902 	 * schedule()			  if (p->on_rq && ..) // false
903 	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
904 	 *   deactivate_task()		      ttwu_queue_wakelist())
905 	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
906 	 *
907 	 * guarantees all stores of 'current' are visible before
908 	 * ->sched_remote_wakeup gets used, so it can be in this word.
909 	 */
910 	unsigned			sched_remote_wakeup:1;
911 
912 	/* Bit to tell LSMs we're in execve(): */
913 	unsigned			in_execve:1;
914 	unsigned			in_iowait:1;
915 #ifndef TIF_RESTORE_SIGMASK
916 	unsigned			restore_sigmask:1;
917 #endif
918 #ifdef CONFIG_MEMCG
919 	unsigned			in_user_fault:1;
920 #endif
921 #ifdef CONFIG_COMPAT_BRK
922 	unsigned			brk_randomized:1;
923 #endif
924 #ifdef CONFIG_CGROUPS
925 	/* disallow userland-initiated cgroup migration */
926 	unsigned			no_cgroup_migration:1;
927 	/* task is frozen/stopped (used by the cgroup freezer) */
928 	unsigned			frozen:1;
929 #endif
930 #ifdef CONFIG_BLK_CGROUP
931 	unsigned			use_memdelay:1;
932 #endif
933 #ifdef CONFIG_PSI
934 	/* Stalled due to lack of memory */
935 	unsigned			in_memstall:1;
936 #endif
937 #ifdef CONFIG_PAGE_OWNER
938 	/* Used by page_owner=on to detect recursion in page tracking. */
939 	unsigned			in_page_owner:1;
940 #endif
941 #ifdef CONFIG_EVENTFD
942 	/* Recursion prevention for eventfd_signal() */
943 	unsigned			in_eventfd_signal:1;
944 #endif
945 
946 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
947 
948 	struct restart_block		restart_block;
949 
950 	pid_t				pid;
951 	pid_t				tgid;
952 
953 #ifdef CONFIG_STACKPROTECTOR
954 	/* Canary value for the -fstack-protector GCC feature: */
955 	unsigned long			stack_canary;
956 #endif
957 	/*
958 	 * Pointers to the (original) parent process, youngest child, younger sibling,
959 	 * older sibling, respectively.  (p->father can be replaced with
960 	 * p->real_parent->pid)
961 	 */
962 
963 	/* Real parent process: */
964 	struct task_struct __rcu	*real_parent;
965 
966 	/* Recipient of SIGCHLD, wait4() reports: */
967 	struct task_struct __rcu	*parent;
968 
969 	/*
970 	 * Children/sibling form the list of natural children:
971 	 */
972 	struct list_head		children;
973 	struct list_head		sibling;
974 	struct task_struct		*group_leader;
975 
976 	/*
977 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
978 	 *
979 	 * This includes both natural children and PTRACE_ATTACH targets.
980 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
981 	 */
982 	struct list_head		ptraced;
983 	struct list_head		ptrace_entry;
984 
985 	/* PID/PID hash table linkage. */
986 	struct pid			*thread_pid;
987 	struct hlist_node		pid_links[PIDTYPE_MAX];
988 	struct list_head		thread_group;
989 	struct list_head		thread_node;
990 
991 	struct completion		*vfork_done;
992 
993 	/* CLONE_CHILD_SETTID: */
994 	int __user			*set_child_tid;
995 
996 	/* CLONE_CHILD_CLEARTID: */
997 	int __user			*clear_child_tid;
998 
999 	/* PF_KTHREAD | PF_IO_WORKER */
1000 	void				*worker_private;
1001 
1002 	u64				utime;
1003 	u64				stime;
1004 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1005 	u64				utimescaled;
1006 	u64				stimescaled;
1007 #endif
1008 	u64				gtime;
1009 	struct prev_cputime		prev_cputime;
1010 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1011 	struct vtime			vtime;
1012 #endif
1013 
1014 #ifdef CONFIG_NO_HZ_FULL
1015 	atomic_t			tick_dep_mask;
1016 #endif
1017 	/* Context switch counts: */
1018 	unsigned long			nvcsw;
1019 	unsigned long			nivcsw;
1020 
1021 	/* Monotonic time in nsecs: */
1022 	u64				start_time;
1023 
1024 	/* Boot based time in nsecs: */
1025 	u64				start_boottime;
1026 
1027 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1028 	unsigned long			min_flt;
1029 	unsigned long			maj_flt;
1030 
1031 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
1032 	struct posix_cputimers		posix_cputimers;
1033 
1034 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1035 	struct posix_cputimers_work	posix_cputimers_work;
1036 #endif
1037 
1038 	/* Process credentials: */
1039 
1040 	/* Tracer's credentials at attach: */
1041 	const struct cred __rcu		*ptracer_cred;
1042 
1043 	/* Objective and real subjective task credentials (COW): */
1044 	const struct cred __rcu		*real_cred;
1045 
1046 	/* Effective (overridable) subjective task credentials (COW): */
1047 	const struct cred __rcu		*cred;
1048 
1049 #ifdef CONFIG_KEYS
1050 	/* Cached requested key. */
1051 	struct key			*cached_requested_key;
1052 #endif
1053 
1054 	/*
1055 	 * executable name, excluding path.
1056 	 *
1057 	 * - normally initialized setup_new_exec()
1058 	 * - access it with [gs]et_task_comm()
1059 	 * - lock it with task_lock()
1060 	 */
1061 	char				comm[TASK_COMM_LEN];
1062 
1063 	struct nameidata		*nameidata;
1064 
1065 #ifdef CONFIG_SYSVIPC
1066 	struct sysv_sem			sysvsem;
1067 	struct sysv_shm			sysvshm;
1068 #endif
1069 #ifdef CONFIG_DETECT_HUNG_TASK
1070 	unsigned long			last_switch_count;
1071 	unsigned long			last_switch_time;
1072 #endif
1073 	/* Filesystem information: */
1074 	struct fs_struct		*fs;
1075 
1076 	/* Open file information: */
1077 	struct files_struct		*files;
1078 
1079 #ifdef CONFIG_IO_URING
1080 	struct io_uring_task		*io_uring;
1081 #endif
1082 
1083 	/* Namespaces: */
1084 	struct nsproxy			*nsproxy;
1085 
1086 	/* Signal handlers: */
1087 	struct signal_struct		*signal;
1088 	struct sighand_struct __rcu		*sighand;
1089 	sigset_t			blocked;
1090 	sigset_t			real_blocked;
1091 	/* Restored if set_restore_sigmask() was used: */
1092 	sigset_t			saved_sigmask;
1093 	struct sigpending		pending;
1094 	unsigned long			sas_ss_sp;
1095 	size_t				sas_ss_size;
1096 	unsigned int			sas_ss_flags;
1097 
1098 	struct callback_head		*task_works;
1099 
1100 #ifdef CONFIG_AUDIT
1101 #ifdef CONFIG_AUDITSYSCALL
1102 	struct audit_context		*audit_context;
1103 #endif
1104 	kuid_t				loginuid;
1105 	unsigned int			sessionid;
1106 #endif
1107 	struct seccomp			seccomp;
1108 	struct syscall_user_dispatch	syscall_dispatch;
1109 
1110 	/* Thread group tracking: */
1111 	u64				parent_exec_id;
1112 	u64				self_exec_id;
1113 
1114 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1115 	spinlock_t			alloc_lock;
1116 
1117 	/* Protection of the PI data structures: */
1118 	raw_spinlock_t			pi_lock;
1119 
1120 	struct wake_q_node		wake_q;
1121 
1122 #ifdef CONFIG_RT_MUTEXES
1123 	/* PI waiters blocked on a rt_mutex held by this task: */
1124 	struct rb_root_cached		pi_waiters;
1125 	/* Updated under owner's pi_lock and rq lock */
1126 	struct task_struct		*pi_top_task;
1127 	/* Deadlock detection and priority inheritance handling: */
1128 	struct rt_mutex_waiter		*pi_blocked_on;
1129 #endif
1130 
1131 #ifdef CONFIG_DEBUG_MUTEXES
1132 	/* Mutex deadlock detection: */
1133 	struct mutex_waiter		*blocked_on;
1134 #endif
1135 
1136 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1137 	int				non_block_count;
1138 #endif
1139 
1140 #ifdef CONFIG_TRACE_IRQFLAGS
1141 	struct irqtrace_events		irqtrace;
1142 	unsigned int			hardirq_threaded;
1143 	u64				hardirq_chain_key;
1144 	int				softirqs_enabled;
1145 	int				softirq_context;
1146 	int				irq_config;
1147 #endif
1148 #ifdef CONFIG_PREEMPT_RT
1149 	int				softirq_disable_cnt;
1150 #endif
1151 
1152 #ifdef CONFIG_LOCKDEP
1153 # define MAX_LOCK_DEPTH			48UL
1154 	u64				curr_chain_key;
1155 	int				lockdep_depth;
1156 	unsigned int			lockdep_recursion;
1157 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1158 #endif
1159 
1160 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1161 	unsigned int			in_ubsan;
1162 #endif
1163 
1164 	/* Journalling filesystem info: */
1165 	void				*journal_info;
1166 
1167 	/* Stacked block device info: */
1168 	struct bio_list			*bio_list;
1169 
1170 	/* Stack plugging: */
1171 	struct blk_plug			*plug;
1172 
1173 	/* VM state: */
1174 	struct reclaim_state		*reclaim_state;
1175 
1176 	struct backing_dev_info		*backing_dev_info;
1177 
1178 	struct io_context		*io_context;
1179 
1180 #ifdef CONFIG_COMPACTION
1181 	struct capture_control		*capture_control;
1182 #endif
1183 	/* Ptrace state: */
1184 	unsigned long			ptrace_message;
1185 	kernel_siginfo_t		*last_siginfo;
1186 
1187 	struct task_io_accounting	ioac;
1188 #ifdef CONFIG_PSI
1189 	/* Pressure stall state */
1190 	unsigned int			psi_flags;
1191 #endif
1192 #ifdef CONFIG_TASK_XACCT
1193 	/* Accumulated RSS usage: */
1194 	u64				acct_rss_mem1;
1195 	/* Accumulated virtual memory usage: */
1196 	u64				acct_vm_mem1;
1197 	/* stime + utime since last update: */
1198 	u64				acct_timexpd;
1199 #endif
1200 #ifdef CONFIG_CPUSETS
1201 	/* Protected by ->alloc_lock: */
1202 	nodemask_t			mems_allowed;
1203 	/* Sequence number to catch updates: */
1204 	seqcount_spinlock_t		mems_allowed_seq;
1205 	int				cpuset_mem_spread_rotor;
1206 	int				cpuset_slab_spread_rotor;
1207 #endif
1208 #ifdef CONFIG_CGROUPS
1209 	/* Control Group info protected by css_set_lock: */
1210 	struct css_set __rcu		*cgroups;
1211 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1212 	struct list_head		cg_list;
1213 #endif
1214 #ifdef CONFIG_X86_CPU_RESCTRL
1215 	u32				closid;
1216 	u32				rmid;
1217 #endif
1218 #ifdef CONFIG_FUTEX
1219 	struct robust_list_head __user	*robust_list;
1220 #ifdef CONFIG_COMPAT
1221 	struct compat_robust_list_head __user *compat_robust_list;
1222 #endif
1223 	struct list_head		pi_state_list;
1224 	struct futex_pi_state		*pi_state_cache;
1225 	struct mutex			futex_exit_mutex;
1226 	unsigned int			futex_state;
1227 #endif
1228 #ifdef CONFIG_PERF_EVENTS
1229 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
1230 	struct mutex			perf_event_mutex;
1231 	struct list_head		perf_event_list;
1232 #endif
1233 #ifdef CONFIG_DEBUG_PREEMPT
1234 	unsigned long			preempt_disable_ip;
1235 #endif
1236 #ifdef CONFIG_NUMA
1237 	/* Protected by alloc_lock: */
1238 	struct mempolicy		*mempolicy;
1239 	short				il_prev;
1240 	short				pref_node_fork;
1241 #endif
1242 #ifdef CONFIG_NUMA_BALANCING
1243 	int				numa_scan_seq;
1244 	unsigned int			numa_scan_period;
1245 	unsigned int			numa_scan_period_max;
1246 	int				numa_preferred_nid;
1247 	unsigned long			numa_migrate_retry;
1248 	/* Migration stamp: */
1249 	u64				node_stamp;
1250 	u64				last_task_numa_placement;
1251 	u64				last_sum_exec_runtime;
1252 	struct callback_head		numa_work;
1253 
1254 	/*
1255 	 * This pointer is only modified for current in syscall and
1256 	 * pagefault context (and for tasks being destroyed), so it can be read
1257 	 * from any of the following contexts:
1258 	 *  - RCU read-side critical section
1259 	 *  - current->numa_group from everywhere
1260 	 *  - task's runqueue locked, task not running
1261 	 */
1262 	struct numa_group __rcu		*numa_group;
1263 
1264 	/*
1265 	 * numa_faults is an array split into four regions:
1266 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1267 	 * in this precise order.
1268 	 *
1269 	 * faults_memory: Exponential decaying average of faults on a per-node
1270 	 * basis. Scheduling placement decisions are made based on these
1271 	 * counts. The values remain static for the duration of a PTE scan.
1272 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1273 	 * hinting fault was incurred.
1274 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1275 	 * during the current scan window. When the scan completes, the counts
1276 	 * in faults_memory and faults_cpu decay and these values are copied.
1277 	 */
1278 	unsigned long			*numa_faults;
1279 	unsigned long			total_numa_faults;
1280 
1281 	/*
1282 	 * numa_faults_locality tracks if faults recorded during the last
1283 	 * scan window were remote/local or failed to migrate. The task scan
1284 	 * period is adapted based on the locality of the faults with different
1285 	 * weights depending on whether they were shared or private faults
1286 	 */
1287 	unsigned long			numa_faults_locality[3];
1288 
1289 	unsigned long			numa_pages_migrated;
1290 #endif /* CONFIG_NUMA_BALANCING */
1291 
1292 #ifdef CONFIG_RSEQ
1293 	struct rseq __user *rseq;
1294 	u32 rseq_sig;
1295 	/*
1296 	 * RmW on rseq_event_mask must be performed atomically
1297 	 * with respect to preemption.
1298 	 */
1299 	unsigned long rseq_event_mask;
1300 #endif
1301 
1302 	struct tlbflush_unmap_batch	tlb_ubc;
1303 
1304 	union {
1305 		refcount_t		rcu_users;
1306 		struct rcu_head		rcu;
1307 	};
1308 
1309 	/* Cache last used pipe for splice(): */
1310 	struct pipe_inode_info		*splice_pipe;
1311 
1312 	struct page_frag		task_frag;
1313 
1314 #ifdef CONFIG_TASK_DELAY_ACCT
1315 	struct task_delay_info		*delays;
1316 #endif
1317 
1318 #ifdef CONFIG_FAULT_INJECTION
1319 	int				make_it_fail;
1320 	unsigned int			fail_nth;
1321 #endif
1322 	/*
1323 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1324 	 * balance_dirty_pages() for a dirty throttling pause:
1325 	 */
1326 	int				nr_dirtied;
1327 	int				nr_dirtied_pause;
1328 	/* Start of a write-and-pause period: */
1329 	unsigned long			dirty_paused_when;
1330 
1331 #ifdef CONFIG_LATENCYTOP
1332 	int				latency_record_count;
1333 	struct latency_record		latency_record[LT_SAVECOUNT];
1334 #endif
1335 	/*
1336 	 * Time slack values; these are used to round up poll() and
1337 	 * select() etc timeout values. These are in nanoseconds.
1338 	 */
1339 	u64				timer_slack_ns;
1340 	u64				default_timer_slack_ns;
1341 
1342 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1343 	unsigned int			kasan_depth;
1344 #endif
1345 
1346 #ifdef CONFIG_KCSAN
1347 	struct kcsan_ctx		kcsan_ctx;
1348 #ifdef CONFIG_TRACE_IRQFLAGS
1349 	struct irqtrace_events		kcsan_save_irqtrace;
1350 #endif
1351 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1352 	int				kcsan_stack_depth;
1353 #endif
1354 #endif
1355 
1356 #if IS_ENABLED(CONFIG_KUNIT)
1357 	struct kunit			*kunit_test;
1358 #endif
1359 
1360 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1361 	/* Index of current stored address in ret_stack: */
1362 	int				curr_ret_stack;
1363 	int				curr_ret_depth;
1364 
1365 	/* Stack of return addresses for return function tracing: */
1366 	struct ftrace_ret_stack		*ret_stack;
1367 
1368 	/* Timestamp for last schedule: */
1369 	unsigned long long		ftrace_timestamp;
1370 
1371 	/*
1372 	 * Number of functions that haven't been traced
1373 	 * because of depth overrun:
1374 	 */
1375 	atomic_t			trace_overrun;
1376 
1377 	/* Pause tracing: */
1378 	atomic_t			tracing_graph_pause;
1379 #endif
1380 
1381 #ifdef CONFIG_TRACING
1382 	/* State flags for use by tracers: */
1383 	unsigned long			trace;
1384 
1385 	/* Bitmask and counter of trace recursion: */
1386 	unsigned long			trace_recursion;
1387 #endif /* CONFIG_TRACING */
1388 
1389 #ifdef CONFIG_KCOV
1390 	/* See kernel/kcov.c for more details. */
1391 
1392 	/* Coverage collection mode enabled for this task (0 if disabled): */
1393 	unsigned int			kcov_mode;
1394 
1395 	/* Size of the kcov_area: */
1396 	unsigned int			kcov_size;
1397 
1398 	/* Buffer for coverage collection: */
1399 	void				*kcov_area;
1400 
1401 	/* KCOV descriptor wired with this task or NULL: */
1402 	struct kcov			*kcov;
1403 
1404 	/* KCOV common handle for remote coverage collection: */
1405 	u64				kcov_handle;
1406 
1407 	/* KCOV sequence number: */
1408 	int				kcov_sequence;
1409 
1410 	/* Collect coverage from softirq context: */
1411 	unsigned int			kcov_softirq;
1412 #endif
1413 
1414 #ifdef CONFIG_MEMCG
1415 	struct mem_cgroup		*memcg_in_oom;
1416 	gfp_t				memcg_oom_gfp_mask;
1417 	int				memcg_oom_order;
1418 
1419 	/* Number of pages to reclaim on returning to userland: */
1420 	unsigned int			memcg_nr_pages_over_high;
1421 
1422 	/* Used by memcontrol for targeted memcg charge: */
1423 	struct mem_cgroup		*active_memcg;
1424 #endif
1425 
1426 #ifdef CONFIG_BLK_CGROUP
1427 	struct request_queue		*throttle_queue;
1428 #endif
1429 
1430 #ifdef CONFIG_UPROBES
1431 	struct uprobe_task		*utask;
1432 #endif
1433 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1434 	unsigned int			sequential_io;
1435 	unsigned int			sequential_io_avg;
1436 #endif
1437 	struct kmap_ctrl		kmap_ctrl;
1438 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1439 	unsigned long			task_state_change;
1440 # ifdef CONFIG_PREEMPT_RT
1441 	unsigned long			saved_state_change;
1442 # endif
1443 #endif
1444 	int				pagefault_disabled;
1445 #ifdef CONFIG_MMU
1446 	struct task_struct		*oom_reaper_list;
1447 #endif
1448 #ifdef CONFIG_VMAP_STACK
1449 	struct vm_struct		*stack_vm_area;
1450 #endif
1451 #ifdef CONFIG_THREAD_INFO_IN_TASK
1452 	/* A live task holds one reference: */
1453 	refcount_t			stack_refcount;
1454 #endif
1455 #ifdef CONFIG_LIVEPATCH
1456 	int patch_state;
1457 #endif
1458 #ifdef CONFIG_SECURITY
1459 	/* Used by LSM modules for access restriction: */
1460 	void				*security;
1461 #endif
1462 #ifdef CONFIG_BPF_SYSCALL
1463 	/* Used by BPF task local storage */
1464 	struct bpf_local_storage __rcu	*bpf_storage;
1465 	/* Used for BPF run context */
1466 	struct bpf_run_ctx		*bpf_ctx;
1467 #endif
1468 
1469 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1470 	unsigned long			lowest_stack;
1471 	unsigned long			prev_lowest_stack;
1472 #endif
1473 
1474 #ifdef CONFIG_X86_MCE
1475 	void __user			*mce_vaddr;
1476 	__u64				mce_kflags;
1477 	u64				mce_addr;
1478 	__u64				mce_ripv : 1,
1479 					mce_whole_page : 1,
1480 					__mce_reserved : 62;
1481 	struct callback_head		mce_kill_me;
1482 	int				mce_count;
1483 #endif
1484 
1485 #ifdef CONFIG_KRETPROBES
1486 	struct llist_head               kretprobe_instances;
1487 #endif
1488 
1489 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1490 	/*
1491 	 * If L1D flush is supported on mm context switch
1492 	 * then we use this callback head to queue kill work
1493 	 * to kill tasks that are not running on SMT disabled
1494 	 * cores
1495 	 */
1496 	struct callback_head		l1d_flush_kill;
1497 #endif
1498 
1499 	/*
1500 	 * New fields for task_struct should be added above here, so that
1501 	 * they are included in the randomized portion of task_struct.
1502 	 */
1503 	randomized_struct_fields_end
1504 
1505 	/* CPU-specific state of this task: */
1506 	struct thread_struct		thread;
1507 
1508 	/*
1509 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1510 	 * structure.  It *MUST* be at the end of 'task_struct'.
1511 	 *
1512 	 * Do not put anything below here!
1513 	 */
1514 };
1515 
1516 static inline struct pid *task_pid(struct task_struct *task)
1517 {
1518 	return task->thread_pid;
1519 }
1520 
1521 /*
1522  * the helpers to get the task's different pids as they are seen
1523  * from various namespaces
1524  *
1525  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1526  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1527  *                     current.
1528  * task_xid_nr_ns()  : id seen from the ns specified;
1529  *
1530  * see also pid_nr() etc in include/linux/pid.h
1531  */
1532 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1533 
1534 static inline pid_t task_pid_nr(struct task_struct *tsk)
1535 {
1536 	return tsk->pid;
1537 }
1538 
1539 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1540 {
1541 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1542 }
1543 
1544 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1545 {
1546 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1547 }
1548 
1549 
1550 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1551 {
1552 	return tsk->tgid;
1553 }
1554 
1555 /**
1556  * pid_alive - check that a task structure is not stale
1557  * @p: Task structure to be checked.
1558  *
1559  * Test if a process is not yet dead (at most zombie state)
1560  * If pid_alive fails, then pointers within the task structure
1561  * can be stale and must not be dereferenced.
1562  *
1563  * Return: 1 if the process is alive. 0 otherwise.
1564  */
1565 static inline int pid_alive(const struct task_struct *p)
1566 {
1567 	return p->thread_pid != NULL;
1568 }
1569 
1570 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1571 {
1572 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1573 }
1574 
1575 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1576 {
1577 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1578 }
1579 
1580 
1581 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1582 {
1583 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1584 }
1585 
1586 static inline pid_t task_session_vnr(struct task_struct *tsk)
1587 {
1588 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1589 }
1590 
1591 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1592 {
1593 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1594 }
1595 
1596 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1597 {
1598 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1599 }
1600 
1601 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1602 {
1603 	pid_t pid = 0;
1604 
1605 	rcu_read_lock();
1606 	if (pid_alive(tsk))
1607 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1608 	rcu_read_unlock();
1609 
1610 	return pid;
1611 }
1612 
1613 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1614 {
1615 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1616 }
1617 
1618 /* Obsolete, do not use: */
1619 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1620 {
1621 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1622 }
1623 
1624 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1625 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1626 
1627 static inline unsigned int task_state_index(struct task_struct *tsk)
1628 {
1629 	unsigned int tsk_state = READ_ONCE(tsk->__state);
1630 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1631 
1632 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1633 
1634 	if (tsk_state == TASK_IDLE)
1635 		state = TASK_REPORT_IDLE;
1636 
1637 	return fls(state);
1638 }
1639 
1640 static inline char task_index_to_char(unsigned int state)
1641 {
1642 	static const char state_char[] = "RSDTtXZPI";
1643 
1644 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1645 
1646 	return state_char[state];
1647 }
1648 
1649 static inline char task_state_to_char(struct task_struct *tsk)
1650 {
1651 	return task_index_to_char(task_state_index(tsk));
1652 }
1653 
1654 /**
1655  * is_global_init - check if a task structure is init. Since init
1656  * is free to have sub-threads we need to check tgid.
1657  * @tsk: Task structure to be checked.
1658  *
1659  * Check if a task structure is the first user space task the kernel created.
1660  *
1661  * Return: 1 if the task structure is init. 0 otherwise.
1662  */
1663 static inline int is_global_init(struct task_struct *tsk)
1664 {
1665 	return task_tgid_nr(tsk) == 1;
1666 }
1667 
1668 extern struct pid *cad_pid;
1669 
1670 /*
1671  * Per process flags
1672  */
1673 #define PF_VCPU			0x00000001	/* I'm a virtual CPU */
1674 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1675 #define PF_EXITING		0x00000004	/* Getting shut down */
1676 #define PF_POSTCOREDUMP		0x00000008	/* Coredumps should ignore this task */
1677 #define PF_IO_WORKER		0x00000010	/* Task is an IO worker */
1678 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1679 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1680 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1681 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1682 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1683 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1684 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1685 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1686 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1687 #define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
1688 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1689 #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1690 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1691 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1692 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1693 #define PF_LOCAL_THROTTLE	0x00100000	/* Throttle writes only against the bdi I write to,
1694 						 * I am cleaning dirty pages from some other bdi. */
1695 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1696 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1697 #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1698 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1699 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1700 #define PF_MEMALLOC_PIN		0x10000000	/* Allocation context constrained to zones which allow long term pinning. */
1701 #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
1702 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1703 
1704 /*
1705  * Only the _current_ task can read/write to tsk->flags, but other
1706  * tasks can access tsk->flags in readonly mode for example
1707  * with tsk_used_math (like during threaded core dumping).
1708  * There is however an exception to this rule during ptrace
1709  * or during fork: the ptracer task is allowed to write to the
1710  * child->flags of its traced child (same goes for fork, the parent
1711  * can write to the child->flags), because we're guaranteed the
1712  * child is not running and in turn not changing child->flags
1713  * at the same time the parent does it.
1714  */
1715 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1716 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1717 #define clear_used_math()			clear_stopped_child_used_math(current)
1718 #define set_used_math()				set_stopped_child_used_math(current)
1719 
1720 #define conditional_stopped_child_used_math(condition, child) \
1721 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1722 
1723 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1724 
1725 #define copy_to_stopped_child_used_math(child) \
1726 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1727 
1728 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1729 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1730 #define used_math()				tsk_used_math(current)
1731 
1732 static __always_inline bool is_percpu_thread(void)
1733 {
1734 #ifdef CONFIG_SMP
1735 	return (current->flags & PF_NO_SETAFFINITY) &&
1736 		(current->nr_cpus_allowed  == 1);
1737 #else
1738 	return true;
1739 #endif
1740 }
1741 
1742 /* Per-process atomic flags. */
1743 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1744 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1745 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1746 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1747 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1748 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1749 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1750 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1751 
1752 #define TASK_PFA_TEST(name, func)					\
1753 	static inline bool task_##func(struct task_struct *p)		\
1754 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1755 
1756 #define TASK_PFA_SET(name, func)					\
1757 	static inline void task_set_##func(struct task_struct *p)	\
1758 	{ set_bit(PFA_##name, &p->atomic_flags); }
1759 
1760 #define TASK_PFA_CLEAR(name, func)					\
1761 	static inline void task_clear_##func(struct task_struct *p)	\
1762 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1763 
1764 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1765 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1766 
1767 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1768 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1769 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1770 
1771 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1772 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1773 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1774 
1775 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1776 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1777 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1778 
1779 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1780 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1781 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1782 
1783 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1784 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1785 
1786 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1787 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1788 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1789 
1790 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1791 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1792 
1793 static inline void
1794 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1795 {
1796 	current->flags &= ~flags;
1797 	current->flags |= orig_flags & flags;
1798 }
1799 
1800 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1801 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1802 #ifdef CONFIG_SMP
1803 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1804 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1805 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1806 extern void release_user_cpus_ptr(struct task_struct *p);
1807 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1808 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1809 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1810 #else
1811 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1812 {
1813 }
1814 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1815 {
1816 	if (!cpumask_test_cpu(0, new_mask))
1817 		return -EINVAL;
1818 	return 0;
1819 }
1820 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1821 {
1822 	if (src->user_cpus_ptr)
1823 		return -EINVAL;
1824 	return 0;
1825 }
1826 static inline void release_user_cpus_ptr(struct task_struct *p)
1827 {
1828 	WARN_ON(p->user_cpus_ptr);
1829 }
1830 
1831 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1832 {
1833 	return 0;
1834 }
1835 #endif
1836 
1837 extern int yield_to(struct task_struct *p, bool preempt);
1838 extern void set_user_nice(struct task_struct *p, long nice);
1839 extern int task_prio(const struct task_struct *p);
1840 
1841 /**
1842  * task_nice - return the nice value of a given task.
1843  * @p: the task in question.
1844  *
1845  * Return: The nice value [ -20 ... 0 ... 19 ].
1846  */
1847 static inline int task_nice(const struct task_struct *p)
1848 {
1849 	return PRIO_TO_NICE((p)->static_prio);
1850 }
1851 
1852 extern int can_nice(const struct task_struct *p, const int nice);
1853 extern int task_curr(const struct task_struct *p);
1854 extern int idle_cpu(int cpu);
1855 extern int available_idle_cpu(int cpu);
1856 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1857 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1858 extern void sched_set_fifo(struct task_struct *p);
1859 extern void sched_set_fifo_low(struct task_struct *p);
1860 extern void sched_set_normal(struct task_struct *p, int nice);
1861 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1862 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1863 extern struct task_struct *idle_task(int cpu);
1864 
1865 /**
1866  * is_idle_task - is the specified task an idle task?
1867  * @p: the task in question.
1868  *
1869  * Return: 1 if @p is an idle task. 0 otherwise.
1870  */
1871 static __always_inline bool is_idle_task(const struct task_struct *p)
1872 {
1873 	return !!(p->flags & PF_IDLE);
1874 }
1875 
1876 extern struct task_struct *curr_task(int cpu);
1877 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1878 
1879 void yield(void);
1880 
1881 union thread_union {
1882 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1883 	struct task_struct task;
1884 #endif
1885 #ifndef CONFIG_THREAD_INFO_IN_TASK
1886 	struct thread_info thread_info;
1887 #endif
1888 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1889 };
1890 
1891 #ifndef CONFIG_THREAD_INFO_IN_TASK
1892 extern struct thread_info init_thread_info;
1893 #endif
1894 
1895 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1896 
1897 #ifdef CONFIG_THREAD_INFO_IN_TASK
1898 # define task_thread_info(task)	(&(task)->thread_info)
1899 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1900 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1901 #endif
1902 
1903 /*
1904  * find a task by one of its numerical ids
1905  *
1906  * find_task_by_pid_ns():
1907  *      finds a task by its pid in the specified namespace
1908  * find_task_by_vpid():
1909  *      finds a task by its virtual pid
1910  *
1911  * see also find_vpid() etc in include/linux/pid.h
1912  */
1913 
1914 extern struct task_struct *find_task_by_vpid(pid_t nr);
1915 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1916 
1917 /*
1918  * find a task by its virtual pid and get the task struct
1919  */
1920 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1921 
1922 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1923 extern int wake_up_process(struct task_struct *tsk);
1924 extern void wake_up_new_task(struct task_struct *tsk);
1925 
1926 #ifdef CONFIG_SMP
1927 extern void kick_process(struct task_struct *tsk);
1928 #else
1929 static inline void kick_process(struct task_struct *tsk) { }
1930 #endif
1931 
1932 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1933 
1934 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1935 {
1936 	__set_task_comm(tsk, from, false);
1937 }
1938 
1939 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1940 #define get_task_comm(buf, tsk) ({			\
1941 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1942 	__get_task_comm(buf, sizeof(buf), tsk);		\
1943 })
1944 
1945 #ifdef CONFIG_SMP
1946 static __always_inline void scheduler_ipi(void)
1947 {
1948 	/*
1949 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1950 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
1951 	 * this IPI.
1952 	 */
1953 	preempt_fold_need_resched();
1954 }
1955 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1956 #else
1957 static inline void scheduler_ipi(void) { }
1958 static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1959 {
1960 	return 1;
1961 }
1962 #endif
1963 
1964 /*
1965  * Set thread flags in other task's structures.
1966  * See asm/thread_info.h for TIF_xxxx flags available:
1967  */
1968 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1969 {
1970 	set_ti_thread_flag(task_thread_info(tsk), flag);
1971 }
1972 
1973 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1974 {
1975 	clear_ti_thread_flag(task_thread_info(tsk), flag);
1976 }
1977 
1978 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1979 					  bool value)
1980 {
1981 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
1982 }
1983 
1984 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1985 {
1986 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1987 }
1988 
1989 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1990 {
1991 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1992 }
1993 
1994 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1995 {
1996 	return test_ti_thread_flag(task_thread_info(tsk), flag);
1997 }
1998 
1999 static inline void set_tsk_need_resched(struct task_struct *tsk)
2000 {
2001 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2002 }
2003 
2004 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2005 {
2006 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2007 }
2008 
2009 static inline int test_tsk_need_resched(struct task_struct *tsk)
2010 {
2011 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2012 }
2013 
2014 /*
2015  * cond_resched() and cond_resched_lock(): latency reduction via
2016  * explicit rescheduling in places that are safe. The return
2017  * value indicates whether a reschedule was done in fact.
2018  * cond_resched_lock() will drop the spinlock before scheduling,
2019  */
2020 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2021 extern int __cond_resched(void);
2022 
2023 #ifdef CONFIG_PREEMPT_DYNAMIC
2024 
2025 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2026 
2027 static __always_inline int _cond_resched(void)
2028 {
2029 	return static_call_mod(cond_resched)();
2030 }
2031 
2032 #else
2033 
2034 static inline int _cond_resched(void)
2035 {
2036 	return __cond_resched();
2037 }
2038 
2039 #endif /* CONFIG_PREEMPT_DYNAMIC */
2040 
2041 #else
2042 
2043 static inline int _cond_resched(void) { return 0; }
2044 
2045 #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
2046 
2047 #define cond_resched() ({			\
2048 	__might_resched(__FILE__, __LINE__, 0);	\
2049 	_cond_resched();			\
2050 })
2051 
2052 extern int __cond_resched_lock(spinlock_t *lock);
2053 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2054 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2055 
2056 #define MIGHT_RESCHED_RCU_SHIFT		8
2057 #define MIGHT_RESCHED_PREEMPT_MASK	((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2058 
2059 #ifndef CONFIG_PREEMPT_RT
2060 /*
2061  * Non RT kernels have an elevated preempt count due to the held lock,
2062  * but are not allowed to be inside a RCU read side critical section
2063  */
2064 # define PREEMPT_LOCK_RESCHED_OFFSETS	PREEMPT_LOCK_OFFSET
2065 #else
2066 /*
2067  * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2068  * cond_resched*lock() has to take that into account because it checks for
2069  * preempt_count() and rcu_preempt_depth().
2070  */
2071 # define PREEMPT_LOCK_RESCHED_OFFSETS	\
2072 	(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2073 #endif
2074 
2075 #define cond_resched_lock(lock) ({						\
2076 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2077 	__cond_resched_lock(lock);						\
2078 })
2079 
2080 #define cond_resched_rwlock_read(lock) ({					\
2081 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2082 	__cond_resched_rwlock_read(lock);					\
2083 })
2084 
2085 #define cond_resched_rwlock_write(lock) ({					\
2086 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2087 	__cond_resched_rwlock_write(lock);					\
2088 })
2089 
2090 static inline void cond_resched_rcu(void)
2091 {
2092 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2093 	rcu_read_unlock();
2094 	cond_resched();
2095 	rcu_read_lock();
2096 #endif
2097 }
2098 
2099 /*
2100  * Does a critical section need to be broken due to another
2101  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2102  * but a general need for low latency)
2103  */
2104 static inline int spin_needbreak(spinlock_t *lock)
2105 {
2106 #ifdef CONFIG_PREEMPTION
2107 	return spin_is_contended(lock);
2108 #else
2109 	return 0;
2110 #endif
2111 }
2112 
2113 /*
2114  * Check if a rwlock is contended.
2115  * Returns non-zero if there is another task waiting on the rwlock.
2116  * Returns zero if the lock is not contended or the system / underlying
2117  * rwlock implementation does not support contention detection.
2118  * Technically does not depend on CONFIG_PREEMPTION, but a general need
2119  * for low latency.
2120  */
2121 static inline int rwlock_needbreak(rwlock_t *lock)
2122 {
2123 #ifdef CONFIG_PREEMPTION
2124 	return rwlock_is_contended(lock);
2125 #else
2126 	return 0;
2127 #endif
2128 }
2129 
2130 static __always_inline bool need_resched(void)
2131 {
2132 	return unlikely(tif_need_resched());
2133 }
2134 
2135 /*
2136  * Wrappers for p->thread_info->cpu access. No-op on UP.
2137  */
2138 #ifdef CONFIG_SMP
2139 
2140 static inline unsigned int task_cpu(const struct task_struct *p)
2141 {
2142 	return READ_ONCE(task_thread_info(p)->cpu);
2143 }
2144 
2145 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2146 
2147 #else
2148 
2149 static inline unsigned int task_cpu(const struct task_struct *p)
2150 {
2151 	return 0;
2152 }
2153 
2154 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2155 {
2156 }
2157 
2158 #endif /* CONFIG_SMP */
2159 
2160 extern bool sched_task_on_rq(struct task_struct *p);
2161 extern unsigned long get_wchan(struct task_struct *p);
2162 
2163 /*
2164  * In order to reduce various lock holder preemption latencies provide an
2165  * interface to see if a vCPU is currently running or not.
2166  *
2167  * This allows us to terminate optimistic spin loops and block, analogous to
2168  * the native optimistic spin heuristic of testing if the lock owner task is
2169  * running or not.
2170  */
2171 #ifndef vcpu_is_preempted
2172 static inline bool vcpu_is_preempted(int cpu)
2173 {
2174 	return false;
2175 }
2176 #endif
2177 
2178 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2179 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2180 
2181 #ifndef TASK_SIZE_OF
2182 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2183 #endif
2184 
2185 #ifdef CONFIG_SMP
2186 static inline bool owner_on_cpu(struct task_struct *owner)
2187 {
2188 	/*
2189 	 * As lock holder preemption issue, we both skip spinning if
2190 	 * task is not on cpu or its cpu is preempted
2191 	 */
2192 	return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2193 }
2194 
2195 /* Returns effective CPU energy utilization, as seen by the scheduler */
2196 unsigned long sched_cpu_util(int cpu, unsigned long max);
2197 #endif /* CONFIG_SMP */
2198 
2199 #ifdef CONFIG_RSEQ
2200 
2201 /*
2202  * Map the event mask on the user-space ABI enum rseq_cs_flags
2203  * for direct mask checks.
2204  */
2205 enum rseq_event_mask_bits {
2206 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2207 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2208 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2209 };
2210 
2211 enum rseq_event_mask {
2212 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
2213 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
2214 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
2215 };
2216 
2217 static inline void rseq_set_notify_resume(struct task_struct *t)
2218 {
2219 	if (t->rseq)
2220 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2221 }
2222 
2223 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2224 
2225 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2226 					     struct pt_regs *regs)
2227 {
2228 	if (current->rseq)
2229 		__rseq_handle_notify_resume(ksig, regs);
2230 }
2231 
2232 static inline void rseq_signal_deliver(struct ksignal *ksig,
2233 				       struct pt_regs *regs)
2234 {
2235 	preempt_disable();
2236 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2237 	preempt_enable();
2238 	rseq_handle_notify_resume(ksig, regs);
2239 }
2240 
2241 /* rseq_preempt() requires preemption to be disabled. */
2242 static inline void rseq_preempt(struct task_struct *t)
2243 {
2244 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2245 	rseq_set_notify_resume(t);
2246 }
2247 
2248 /* rseq_migrate() requires preemption to be disabled. */
2249 static inline void rseq_migrate(struct task_struct *t)
2250 {
2251 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2252 	rseq_set_notify_resume(t);
2253 }
2254 
2255 /*
2256  * If parent process has a registered restartable sequences area, the
2257  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2258  */
2259 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2260 {
2261 	if (clone_flags & CLONE_VM) {
2262 		t->rseq = NULL;
2263 		t->rseq_sig = 0;
2264 		t->rseq_event_mask = 0;
2265 	} else {
2266 		t->rseq = current->rseq;
2267 		t->rseq_sig = current->rseq_sig;
2268 		t->rseq_event_mask = current->rseq_event_mask;
2269 	}
2270 }
2271 
2272 static inline void rseq_execve(struct task_struct *t)
2273 {
2274 	t->rseq = NULL;
2275 	t->rseq_sig = 0;
2276 	t->rseq_event_mask = 0;
2277 }
2278 
2279 #else
2280 
2281 static inline void rseq_set_notify_resume(struct task_struct *t)
2282 {
2283 }
2284 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2285 					     struct pt_regs *regs)
2286 {
2287 }
2288 static inline void rseq_signal_deliver(struct ksignal *ksig,
2289 				       struct pt_regs *regs)
2290 {
2291 }
2292 static inline void rseq_preempt(struct task_struct *t)
2293 {
2294 }
2295 static inline void rseq_migrate(struct task_struct *t)
2296 {
2297 }
2298 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2299 {
2300 }
2301 static inline void rseq_execve(struct task_struct *t)
2302 {
2303 }
2304 
2305 #endif
2306 
2307 #ifdef CONFIG_DEBUG_RSEQ
2308 
2309 void rseq_syscall(struct pt_regs *regs);
2310 
2311 #else
2312 
2313 static inline void rseq_syscall(struct pt_regs *regs)
2314 {
2315 }
2316 
2317 #endif
2318 
2319 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2320 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2321 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2322 
2323 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2324 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2325 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2326 
2327 int sched_trace_rq_cpu(struct rq *rq);
2328 int sched_trace_rq_cpu_capacity(struct rq *rq);
2329 int sched_trace_rq_nr_running(struct rq *rq);
2330 
2331 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2332 
2333 #ifdef CONFIG_SCHED_CORE
2334 extern void sched_core_free(struct task_struct *tsk);
2335 extern void sched_core_fork(struct task_struct *p);
2336 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2337 				unsigned long uaddr);
2338 #else
2339 static inline void sched_core_free(struct task_struct *tsk) { }
2340 static inline void sched_core_fork(struct task_struct *p) { }
2341 #endif
2342 
2343 #endif
2344