xref: /linux/include/linux/sched.h (revision 8e396880a864b80381b3f402e36d9c428422315b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kmsan_types.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/irqflags.h>
22 #include <linux/seccomp.h>
23 #include <linux/nodemask.h>
24 #include <linux/rcupdate.h>
25 #include <linux/refcount.h>
26 #include <linux/resource.h>
27 #include <linux/latencytop.h>
28 #include <linux/sched/prio.h>
29 #include <linux/sched/types.h>
30 #include <linux/signal_types.h>
31 #include <linux/syscall_user_dispatch.h>
32 #include <linux/mm_types_task.h>
33 #include <linux/task_io_accounting.h>
34 #include <linux/posix-timers.h>
35 #include <linux/rseq.h>
36 #include <linux/seqlock.h>
37 #include <linux/kcsan.h>
38 #include <linux/rv.h>
39 #include <linux/livepatch_sched.h>
40 #include <asm/kmap_size.h>
41 
42 /* task_struct member predeclarations (sorted alphabetically): */
43 struct audit_context;
44 struct backing_dev_info;
45 struct bio_list;
46 struct blk_plug;
47 struct bpf_local_storage;
48 struct bpf_run_ctx;
49 struct capture_control;
50 struct cfs_rq;
51 struct fs_struct;
52 struct futex_pi_state;
53 struct io_context;
54 struct io_uring_task;
55 struct mempolicy;
56 struct nameidata;
57 struct nsproxy;
58 struct perf_event_context;
59 struct pid_namespace;
60 struct pipe_inode_info;
61 struct rcu_node;
62 struct reclaim_state;
63 struct robust_list_head;
64 struct root_domain;
65 struct rq;
66 struct sched_attr;
67 struct sched_param;
68 struct seq_file;
69 struct sighand_struct;
70 struct signal_struct;
71 struct task_delay_info;
72 struct task_group;
73 struct user_event_mm;
74 
75 /*
76  * Task state bitmask. NOTE! These bits are also
77  * encoded in fs/proc/array.c: get_task_state().
78  *
79  * We have two separate sets of flags: task->state
80  * is about runnability, while task->exit_state are
81  * about the task exiting. Confusing, but this way
82  * modifying one set can't modify the other one by
83  * mistake.
84  */
85 
86 /* Used in tsk->state: */
87 #define TASK_RUNNING			0x00000000
88 #define TASK_INTERRUPTIBLE		0x00000001
89 #define TASK_UNINTERRUPTIBLE		0x00000002
90 #define __TASK_STOPPED			0x00000004
91 #define __TASK_TRACED			0x00000008
92 /* Used in tsk->exit_state: */
93 #define EXIT_DEAD			0x00000010
94 #define EXIT_ZOMBIE			0x00000020
95 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
96 /* Used in tsk->state again: */
97 #define TASK_PARKED			0x00000040
98 #define TASK_DEAD			0x00000080
99 #define TASK_WAKEKILL			0x00000100
100 #define TASK_WAKING			0x00000200
101 #define TASK_NOLOAD			0x00000400
102 #define TASK_NEW			0x00000800
103 #define TASK_RTLOCK_WAIT		0x00001000
104 #define TASK_FREEZABLE			0x00002000
105 #define __TASK_FREEZABLE_UNSAFE	       (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
106 #define TASK_FROZEN			0x00008000
107 #define TASK_STATE_MAX			0x00010000
108 
109 #define TASK_ANY			(TASK_STATE_MAX-1)
110 
111 /*
112  * DO NOT ADD ANY NEW USERS !
113  */
114 #define TASK_FREEZABLE_UNSAFE		(TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
115 
116 /* Convenience macros for the sake of set_current_state: */
117 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
118 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
119 #define TASK_TRACED			__TASK_TRACED
120 
121 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
122 
123 /* Convenience macros for the sake of wake_up(): */
124 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
125 
126 /* get_task_state(): */
127 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
128 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
129 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
130 					 TASK_PARKED)
131 
132 #define task_is_running(task)		(READ_ONCE((task)->__state) == TASK_RUNNING)
133 
134 #define task_is_traced(task)		((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
135 #define task_is_stopped(task)		((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
136 #define task_is_stopped_or_traced(task)	((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
137 
138 /*
139  * Special states are those that do not use the normal wait-loop pattern. See
140  * the comment with set_special_state().
141  */
142 #define is_special_task_state(state)				\
143 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
144 
145 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
146 # define debug_normal_state_change(state_value)				\
147 	do {								\
148 		WARN_ON_ONCE(is_special_task_state(state_value));	\
149 		current->task_state_change = _THIS_IP_;			\
150 	} while (0)
151 
152 # define debug_special_state_change(state_value)			\
153 	do {								\
154 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
155 		current->task_state_change = _THIS_IP_;			\
156 	} while (0)
157 
158 # define debug_rtlock_wait_set_state()					\
159 	do {								 \
160 		current->saved_state_change = current->task_state_change;\
161 		current->task_state_change = _THIS_IP_;			 \
162 	} while (0)
163 
164 # define debug_rtlock_wait_restore_state()				\
165 	do {								 \
166 		current->task_state_change = current->saved_state_change;\
167 	} while (0)
168 
169 #else
170 # define debug_normal_state_change(cond)	do { } while (0)
171 # define debug_special_state_change(cond)	do { } while (0)
172 # define debug_rtlock_wait_set_state()		do { } while (0)
173 # define debug_rtlock_wait_restore_state()	do { } while (0)
174 #endif
175 
176 /*
177  * set_current_state() includes a barrier so that the write of current->state
178  * is correctly serialised wrt the caller's subsequent test of whether to
179  * actually sleep:
180  *
181  *   for (;;) {
182  *	set_current_state(TASK_UNINTERRUPTIBLE);
183  *	if (CONDITION)
184  *	   break;
185  *
186  *	schedule();
187  *   }
188  *   __set_current_state(TASK_RUNNING);
189  *
190  * If the caller does not need such serialisation (because, for instance, the
191  * CONDITION test and condition change and wakeup are under the same lock) then
192  * use __set_current_state().
193  *
194  * The above is typically ordered against the wakeup, which does:
195  *
196  *   CONDITION = 1;
197  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
198  *
199  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
200  * accessing p->state.
201  *
202  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
203  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
204  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
205  *
206  * However, with slightly different timing the wakeup TASK_RUNNING store can
207  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
208  * a problem either because that will result in one extra go around the loop
209  * and our @cond test will save the day.
210  *
211  * Also see the comments of try_to_wake_up().
212  */
213 #define __set_current_state(state_value)				\
214 	do {								\
215 		debug_normal_state_change((state_value));		\
216 		WRITE_ONCE(current->__state, (state_value));		\
217 	} while (0)
218 
219 #define set_current_state(state_value)					\
220 	do {								\
221 		debug_normal_state_change((state_value));		\
222 		smp_store_mb(current->__state, (state_value));		\
223 	} while (0)
224 
225 /*
226  * set_special_state() should be used for those states when the blocking task
227  * can not use the regular condition based wait-loop. In that case we must
228  * serialize against wakeups such that any possible in-flight TASK_RUNNING
229  * stores will not collide with our state change.
230  */
231 #define set_special_state(state_value)					\
232 	do {								\
233 		unsigned long flags; /* may shadow */			\
234 									\
235 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
236 		debug_special_state_change((state_value));		\
237 		WRITE_ONCE(current->__state, (state_value));		\
238 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
239 	} while (0)
240 
241 /*
242  * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
243  *
244  * RT's spin/rwlock substitutions are state preserving. The state of the
245  * task when blocking on the lock is saved in task_struct::saved_state and
246  * restored after the lock has been acquired.  These operations are
247  * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
248  * lock related wakeups while the task is blocked on the lock are
249  * redirected to operate on task_struct::saved_state to ensure that these
250  * are not dropped. On restore task_struct::saved_state is set to
251  * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
252  *
253  * The lock operation looks like this:
254  *
255  *	current_save_and_set_rtlock_wait_state();
256  *	for (;;) {
257  *		if (try_lock())
258  *			break;
259  *		raw_spin_unlock_irq(&lock->wait_lock);
260  *		schedule_rtlock();
261  *		raw_spin_lock_irq(&lock->wait_lock);
262  *		set_current_state(TASK_RTLOCK_WAIT);
263  *	}
264  *	current_restore_rtlock_saved_state();
265  */
266 #define current_save_and_set_rtlock_wait_state()			\
267 	do {								\
268 		lockdep_assert_irqs_disabled();				\
269 		raw_spin_lock(&current->pi_lock);			\
270 		current->saved_state = current->__state;		\
271 		debug_rtlock_wait_set_state();				\
272 		WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);		\
273 		raw_spin_unlock(&current->pi_lock);			\
274 	} while (0);
275 
276 #define current_restore_rtlock_saved_state()				\
277 	do {								\
278 		lockdep_assert_irqs_disabled();				\
279 		raw_spin_lock(&current->pi_lock);			\
280 		debug_rtlock_wait_restore_state();			\
281 		WRITE_ONCE(current->__state, current->saved_state);	\
282 		current->saved_state = TASK_RUNNING;			\
283 		raw_spin_unlock(&current->pi_lock);			\
284 	} while (0);
285 
286 #define get_current_state()	READ_ONCE(current->__state)
287 
288 /*
289  * Define the task command name length as enum, then it can be visible to
290  * BPF programs.
291  */
292 enum {
293 	TASK_COMM_LEN = 16,
294 };
295 
296 extern void scheduler_tick(void);
297 
298 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
299 
300 extern long schedule_timeout(long timeout);
301 extern long schedule_timeout_interruptible(long timeout);
302 extern long schedule_timeout_killable(long timeout);
303 extern long schedule_timeout_uninterruptible(long timeout);
304 extern long schedule_timeout_idle(long timeout);
305 asmlinkage void schedule(void);
306 extern void schedule_preempt_disabled(void);
307 asmlinkage void preempt_schedule_irq(void);
308 #ifdef CONFIG_PREEMPT_RT
309  extern void schedule_rtlock(void);
310 #endif
311 
312 extern int __must_check io_schedule_prepare(void);
313 extern void io_schedule_finish(int token);
314 extern long io_schedule_timeout(long timeout);
315 extern void io_schedule(void);
316 
317 /**
318  * struct prev_cputime - snapshot of system and user cputime
319  * @utime: time spent in user mode
320  * @stime: time spent in system mode
321  * @lock: protects the above two fields
322  *
323  * Stores previous user/system time values such that we can guarantee
324  * monotonicity.
325  */
326 struct prev_cputime {
327 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
328 	u64				utime;
329 	u64				stime;
330 	raw_spinlock_t			lock;
331 #endif
332 };
333 
334 enum vtime_state {
335 	/* Task is sleeping or running in a CPU with VTIME inactive: */
336 	VTIME_INACTIVE = 0,
337 	/* Task is idle */
338 	VTIME_IDLE,
339 	/* Task runs in kernelspace in a CPU with VTIME active: */
340 	VTIME_SYS,
341 	/* Task runs in userspace in a CPU with VTIME active: */
342 	VTIME_USER,
343 	/* Task runs as guests in a CPU with VTIME active: */
344 	VTIME_GUEST,
345 };
346 
347 struct vtime {
348 	seqcount_t		seqcount;
349 	unsigned long long	starttime;
350 	enum vtime_state	state;
351 	unsigned int		cpu;
352 	u64			utime;
353 	u64			stime;
354 	u64			gtime;
355 };
356 
357 /*
358  * Utilization clamp constraints.
359  * @UCLAMP_MIN:	Minimum utilization
360  * @UCLAMP_MAX:	Maximum utilization
361  * @UCLAMP_CNT:	Utilization clamp constraints count
362  */
363 enum uclamp_id {
364 	UCLAMP_MIN = 0,
365 	UCLAMP_MAX,
366 	UCLAMP_CNT
367 };
368 
369 #ifdef CONFIG_SMP
370 extern struct root_domain def_root_domain;
371 extern struct mutex sched_domains_mutex;
372 #endif
373 
374 struct sched_info {
375 #ifdef CONFIG_SCHED_INFO
376 	/* Cumulative counters: */
377 
378 	/* # of times we have run on this CPU: */
379 	unsigned long			pcount;
380 
381 	/* Time spent waiting on a runqueue: */
382 	unsigned long long		run_delay;
383 
384 	/* Timestamps: */
385 
386 	/* When did we last run on a CPU? */
387 	unsigned long long		last_arrival;
388 
389 	/* When were we last queued to run? */
390 	unsigned long long		last_queued;
391 
392 #endif /* CONFIG_SCHED_INFO */
393 };
394 
395 /*
396  * Integer metrics need fixed point arithmetic, e.g., sched/fair
397  * has a few: load, load_avg, util_avg, freq, and capacity.
398  *
399  * We define a basic fixed point arithmetic range, and then formalize
400  * all these metrics based on that basic range.
401  */
402 # define SCHED_FIXEDPOINT_SHIFT		10
403 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
404 
405 /* Increase resolution of cpu_capacity calculations */
406 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
407 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
408 
409 struct load_weight {
410 	unsigned long			weight;
411 	u32				inv_weight;
412 };
413 
414 /**
415  * struct util_est - Estimation utilization of FAIR tasks
416  * @enqueued: instantaneous estimated utilization of a task/cpu
417  * @ewma:     the Exponential Weighted Moving Average (EWMA)
418  *            utilization of a task
419  *
420  * Support data structure to track an Exponential Weighted Moving Average
421  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
422  * average each time a task completes an activation. Sample's weight is chosen
423  * so that the EWMA will be relatively insensitive to transient changes to the
424  * task's workload.
425  *
426  * The enqueued attribute has a slightly different meaning for tasks and cpus:
427  * - task:   the task's util_avg at last task dequeue time
428  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
429  * Thus, the util_est.enqueued of a task represents the contribution on the
430  * estimated utilization of the CPU where that task is currently enqueued.
431  *
432  * Only for tasks we track a moving average of the past instantaneous
433  * estimated utilization. This allows to absorb sporadic drops in utilization
434  * of an otherwise almost periodic task.
435  *
436  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
437  * updates. When a task is dequeued, its util_est should not be updated if its
438  * util_avg has not been updated in the meantime.
439  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
440  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
441  * for a task) it is safe to use MSB.
442  */
443 struct util_est {
444 	unsigned int			enqueued;
445 	unsigned int			ewma;
446 #define UTIL_EST_WEIGHT_SHIFT		2
447 #define UTIL_AVG_UNCHANGED		0x80000000
448 } __attribute__((__aligned__(sizeof(u64))));
449 
450 /*
451  * The load/runnable/util_avg accumulates an infinite geometric series
452  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
453  *
454  * [load_avg definition]
455  *
456  *   load_avg = runnable% * scale_load_down(load)
457  *
458  * [runnable_avg definition]
459  *
460  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
461  *
462  * [util_avg definition]
463  *
464  *   util_avg = running% * SCHED_CAPACITY_SCALE
465  *
466  * where runnable% is the time ratio that a sched_entity is runnable and
467  * running% the time ratio that a sched_entity is running.
468  *
469  * For cfs_rq, they are the aggregated values of all runnable and blocked
470  * sched_entities.
471  *
472  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
473  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
474  * for computing those signals (see update_rq_clock_pelt())
475  *
476  * N.B., the above ratios (runnable% and running%) themselves are in the
477  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
478  * to as large a range as necessary. This is for example reflected by
479  * util_avg's SCHED_CAPACITY_SCALE.
480  *
481  * [Overflow issue]
482  *
483  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
484  * with the highest load (=88761), always runnable on a single cfs_rq,
485  * and should not overflow as the number already hits PID_MAX_LIMIT.
486  *
487  * For all other cases (including 32-bit kernels), struct load_weight's
488  * weight will overflow first before we do, because:
489  *
490  *    Max(load_avg) <= Max(load.weight)
491  *
492  * Then it is the load_weight's responsibility to consider overflow
493  * issues.
494  */
495 struct sched_avg {
496 	u64				last_update_time;
497 	u64				load_sum;
498 	u64				runnable_sum;
499 	u32				util_sum;
500 	u32				period_contrib;
501 	unsigned long			load_avg;
502 	unsigned long			runnable_avg;
503 	unsigned long			util_avg;
504 	struct util_est			util_est;
505 } ____cacheline_aligned;
506 
507 struct sched_statistics {
508 #ifdef CONFIG_SCHEDSTATS
509 	u64				wait_start;
510 	u64				wait_max;
511 	u64				wait_count;
512 	u64				wait_sum;
513 	u64				iowait_count;
514 	u64				iowait_sum;
515 
516 	u64				sleep_start;
517 	u64				sleep_max;
518 	s64				sum_sleep_runtime;
519 
520 	u64				block_start;
521 	u64				block_max;
522 	s64				sum_block_runtime;
523 
524 	u64				exec_max;
525 	u64				slice_max;
526 
527 	u64				nr_migrations_cold;
528 	u64				nr_failed_migrations_affine;
529 	u64				nr_failed_migrations_running;
530 	u64				nr_failed_migrations_hot;
531 	u64				nr_forced_migrations;
532 
533 	u64				nr_wakeups;
534 	u64				nr_wakeups_sync;
535 	u64				nr_wakeups_migrate;
536 	u64				nr_wakeups_local;
537 	u64				nr_wakeups_remote;
538 	u64				nr_wakeups_affine;
539 	u64				nr_wakeups_affine_attempts;
540 	u64				nr_wakeups_passive;
541 	u64				nr_wakeups_idle;
542 
543 #ifdef CONFIG_SCHED_CORE
544 	u64				core_forceidle_sum;
545 #endif
546 #endif /* CONFIG_SCHEDSTATS */
547 } ____cacheline_aligned;
548 
549 struct sched_entity {
550 	/* For load-balancing: */
551 	struct load_weight		load;
552 	struct rb_node			run_node;
553 	struct list_head		group_node;
554 	unsigned int			on_rq;
555 
556 	u64				exec_start;
557 	u64				sum_exec_runtime;
558 	u64				vruntime;
559 	u64				prev_sum_exec_runtime;
560 
561 	u64				nr_migrations;
562 
563 #ifdef CONFIG_FAIR_GROUP_SCHED
564 	int				depth;
565 	struct sched_entity		*parent;
566 	/* rq on which this entity is (to be) queued: */
567 	struct cfs_rq			*cfs_rq;
568 	/* rq "owned" by this entity/group: */
569 	struct cfs_rq			*my_q;
570 	/* cached value of my_q->h_nr_running */
571 	unsigned long			runnable_weight;
572 #endif
573 
574 #ifdef CONFIG_SMP
575 	/*
576 	 * Per entity load average tracking.
577 	 *
578 	 * Put into separate cache line so it does not
579 	 * collide with read-mostly values above.
580 	 */
581 	struct sched_avg		avg;
582 #endif
583 };
584 
585 struct sched_rt_entity {
586 	struct list_head		run_list;
587 	unsigned long			timeout;
588 	unsigned long			watchdog_stamp;
589 	unsigned int			time_slice;
590 	unsigned short			on_rq;
591 	unsigned short			on_list;
592 
593 	struct sched_rt_entity		*back;
594 #ifdef CONFIG_RT_GROUP_SCHED
595 	struct sched_rt_entity		*parent;
596 	/* rq on which this entity is (to be) queued: */
597 	struct rt_rq			*rt_rq;
598 	/* rq "owned" by this entity/group: */
599 	struct rt_rq			*my_q;
600 #endif
601 } __randomize_layout;
602 
603 struct sched_dl_entity {
604 	struct rb_node			rb_node;
605 
606 	/*
607 	 * Original scheduling parameters. Copied here from sched_attr
608 	 * during sched_setattr(), they will remain the same until
609 	 * the next sched_setattr().
610 	 */
611 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
612 	u64				dl_deadline;	/* Relative deadline of each instance	*/
613 	u64				dl_period;	/* Separation of two instances (period) */
614 	u64				dl_bw;		/* dl_runtime / dl_period		*/
615 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
616 
617 	/*
618 	 * Actual scheduling parameters. Initialized with the values above,
619 	 * they are continuously updated during task execution. Note that
620 	 * the remaining runtime could be < 0 in case we are in overrun.
621 	 */
622 	s64				runtime;	/* Remaining runtime for this instance	*/
623 	u64				deadline;	/* Absolute deadline for this instance	*/
624 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
625 
626 	/*
627 	 * Some bool flags:
628 	 *
629 	 * @dl_throttled tells if we exhausted the runtime. If so, the
630 	 * task has to wait for a replenishment to be performed at the
631 	 * next firing of dl_timer.
632 	 *
633 	 * @dl_yielded tells if task gave up the CPU before consuming
634 	 * all its available runtime during the last job.
635 	 *
636 	 * @dl_non_contending tells if the task is inactive while still
637 	 * contributing to the active utilization. In other words, it
638 	 * indicates if the inactive timer has been armed and its handler
639 	 * has not been executed yet. This flag is useful to avoid race
640 	 * conditions between the inactive timer handler and the wakeup
641 	 * code.
642 	 *
643 	 * @dl_overrun tells if the task asked to be informed about runtime
644 	 * overruns.
645 	 */
646 	unsigned int			dl_throttled      : 1;
647 	unsigned int			dl_yielded        : 1;
648 	unsigned int			dl_non_contending : 1;
649 	unsigned int			dl_overrun	  : 1;
650 
651 	/*
652 	 * Bandwidth enforcement timer. Each -deadline task has its
653 	 * own bandwidth to be enforced, thus we need one timer per task.
654 	 */
655 	struct hrtimer			dl_timer;
656 
657 	/*
658 	 * Inactive timer, responsible for decreasing the active utilization
659 	 * at the "0-lag time". When a -deadline task blocks, it contributes
660 	 * to GRUB's active utilization until the "0-lag time", hence a
661 	 * timer is needed to decrease the active utilization at the correct
662 	 * time.
663 	 */
664 	struct hrtimer inactive_timer;
665 
666 #ifdef CONFIG_RT_MUTEXES
667 	/*
668 	 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
669 	 * pi_se points to the donor, otherwise points to the dl_se it belongs
670 	 * to (the original one/itself).
671 	 */
672 	struct sched_dl_entity *pi_se;
673 #endif
674 };
675 
676 #ifdef CONFIG_UCLAMP_TASK
677 /* Number of utilization clamp buckets (shorter alias) */
678 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
679 
680 /*
681  * Utilization clamp for a scheduling entity
682  * @value:		clamp value "assigned" to a se
683  * @bucket_id:		bucket index corresponding to the "assigned" value
684  * @active:		the se is currently refcounted in a rq's bucket
685  * @user_defined:	the requested clamp value comes from user-space
686  *
687  * The bucket_id is the index of the clamp bucket matching the clamp value
688  * which is pre-computed and stored to avoid expensive integer divisions from
689  * the fast path.
690  *
691  * The active bit is set whenever a task has got an "effective" value assigned,
692  * which can be different from the clamp value "requested" from user-space.
693  * This allows to know a task is refcounted in the rq's bucket corresponding
694  * to the "effective" bucket_id.
695  *
696  * The user_defined bit is set whenever a task has got a task-specific clamp
697  * value requested from userspace, i.e. the system defaults apply to this task
698  * just as a restriction. This allows to relax default clamps when a less
699  * restrictive task-specific value has been requested, thus allowing to
700  * implement a "nice" semantic. For example, a task running with a 20%
701  * default boost can still drop its own boosting to 0%.
702  */
703 struct uclamp_se {
704 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
705 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
706 	unsigned int active		: 1;
707 	unsigned int user_defined	: 1;
708 };
709 #endif /* CONFIG_UCLAMP_TASK */
710 
711 union rcu_special {
712 	struct {
713 		u8			blocked;
714 		u8			need_qs;
715 		u8			exp_hint; /* Hint for performance. */
716 		u8			need_mb; /* Readers need smp_mb(). */
717 	} b; /* Bits. */
718 	u32 s; /* Set of bits. */
719 };
720 
721 enum perf_event_task_context {
722 	perf_invalid_context = -1,
723 	perf_hw_context = 0,
724 	perf_sw_context,
725 	perf_nr_task_contexts,
726 };
727 
728 struct wake_q_node {
729 	struct wake_q_node *next;
730 };
731 
732 struct kmap_ctrl {
733 #ifdef CONFIG_KMAP_LOCAL
734 	int				idx;
735 	pte_t				pteval[KM_MAX_IDX];
736 #endif
737 };
738 
739 struct task_struct {
740 #ifdef CONFIG_THREAD_INFO_IN_TASK
741 	/*
742 	 * For reasons of header soup (see current_thread_info()), this
743 	 * must be the first element of task_struct.
744 	 */
745 	struct thread_info		thread_info;
746 #endif
747 	unsigned int			__state;
748 
749 #ifdef CONFIG_PREEMPT_RT
750 	/* saved state for "spinlock sleepers" */
751 	unsigned int			saved_state;
752 #endif
753 
754 	/*
755 	 * This begins the randomizable portion of task_struct. Only
756 	 * scheduling-critical items should be added above here.
757 	 */
758 	randomized_struct_fields_start
759 
760 	void				*stack;
761 	refcount_t			usage;
762 	/* Per task flags (PF_*), defined further below: */
763 	unsigned int			flags;
764 	unsigned int			ptrace;
765 
766 #ifdef CONFIG_SMP
767 	int				on_cpu;
768 	struct __call_single_node	wake_entry;
769 	unsigned int			wakee_flips;
770 	unsigned long			wakee_flip_decay_ts;
771 	struct task_struct		*last_wakee;
772 
773 	/*
774 	 * recent_used_cpu is initially set as the last CPU used by a task
775 	 * that wakes affine another task. Waker/wakee relationships can
776 	 * push tasks around a CPU where each wakeup moves to the next one.
777 	 * Tracking a recently used CPU allows a quick search for a recently
778 	 * used CPU that may be idle.
779 	 */
780 	int				recent_used_cpu;
781 	int				wake_cpu;
782 #endif
783 	int				on_rq;
784 
785 	int				prio;
786 	int				static_prio;
787 	int				normal_prio;
788 	unsigned int			rt_priority;
789 
790 	struct sched_entity		se;
791 	struct sched_rt_entity		rt;
792 	struct sched_dl_entity		dl;
793 	const struct sched_class	*sched_class;
794 
795 #ifdef CONFIG_SCHED_CORE
796 	struct rb_node			core_node;
797 	unsigned long			core_cookie;
798 	unsigned int			core_occupation;
799 #endif
800 
801 #ifdef CONFIG_CGROUP_SCHED
802 	struct task_group		*sched_task_group;
803 #endif
804 
805 #ifdef CONFIG_UCLAMP_TASK
806 	/*
807 	 * Clamp values requested for a scheduling entity.
808 	 * Must be updated with task_rq_lock() held.
809 	 */
810 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
811 	/*
812 	 * Effective clamp values used for a scheduling entity.
813 	 * Must be updated with task_rq_lock() held.
814 	 */
815 	struct uclamp_se		uclamp[UCLAMP_CNT];
816 #endif
817 
818 	struct sched_statistics         stats;
819 
820 #ifdef CONFIG_PREEMPT_NOTIFIERS
821 	/* List of struct preempt_notifier: */
822 	struct hlist_head		preempt_notifiers;
823 #endif
824 
825 #ifdef CONFIG_BLK_DEV_IO_TRACE
826 	unsigned int			btrace_seq;
827 #endif
828 
829 	unsigned int			policy;
830 	int				nr_cpus_allowed;
831 	const cpumask_t			*cpus_ptr;
832 	cpumask_t			*user_cpus_ptr;
833 	cpumask_t			cpus_mask;
834 	void				*migration_pending;
835 #ifdef CONFIG_SMP
836 	unsigned short			migration_disabled;
837 #endif
838 	unsigned short			migration_flags;
839 
840 #ifdef CONFIG_PREEMPT_RCU
841 	int				rcu_read_lock_nesting;
842 	union rcu_special		rcu_read_unlock_special;
843 	struct list_head		rcu_node_entry;
844 	struct rcu_node			*rcu_blocked_node;
845 #endif /* #ifdef CONFIG_PREEMPT_RCU */
846 
847 #ifdef CONFIG_TASKS_RCU
848 	unsigned long			rcu_tasks_nvcsw;
849 	u8				rcu_tasks_holdout;
850 	u8				rcu_tasks_idx;
851 	int				rcu_tasks_idle_cpu;
852 	struct list_head		rcu_tasks_holdout_list;
853 #endif /* #ifdef CONFIG_TASKS_RCU */
854 
855 #ifdef CONFIG_TASKS_TRACE_RCU
856 	int				trc_reader_nesting;
857 	int				trc_ipi_to_cpu;
858 	union rcu_special		trc_reader_special;
859 	struct list_head		trc_holdout_list;
860 	struct list_head		trc_blkd_node;
861 	int				trc_blkd_cpu;
862 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
863 
864 	struct sched_info		sched_info;
865 
866 	struct list_head		tasks;
867 #ifdef CONFIG_SMP
868 	struct plist_node		pushable_tasks;
869 	struct rb_node			pushable_dl_tasks;
870 #endif
871 
872 	struct mm_struct		*mm;
873 	struct mm_struct		*active_mm;
874 
875 	int				exit_state;
876 	int				exit_code;
877 	int				exit_signal;
878 	/* The signal sent when the parent dies: */
879 	int				pdeath_signal;
880 	/* JOBCTL_*, siglock protected: */
881 	unsigned long			jobctl;
882 
883 	/* Used for emulating ABI behavior of previous Linux versions: */
884 	unsigned int			personality;
885 
886 	/* Scheduler bits, serialized by scheduler locks: */
887 	unsigned			sched_reset_on_fork:1;
888 	unsigned			sched_contributes_to_load:1;
889 	unsigned			sched_migrated:1;
890 
891 	/* Force alignment to the next boundary: */
892 	unsigned			:0;
893 
894 	/* Unserialized, strictly 'current' */
895 
896 	/*
897 	 * This field must not be in the scheduler word above due to wakelist
898 	 * queueing no longer being serialized by p->on_cpu. However:
899 	 *
900 	 * p->XXX = X;			ttwu()
901 	 * schedule()			  if (p->on_rq && ..) // false
902 	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
903 	 *   deactivate_task()		      ttwu_queue_wakelist())
904 	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
905 	 *
906 	 * guarantees all stores of 'current' are visible before
907 	 * ->sched_remote_wakeup gets used, so it can be in this word.
908 	 */
909 	unsigned			sched_remote_wakeup:1;
910 
911 	/* Bit to tell LSMs we're in execve(): */
912 	unsigned			in_execve:1;
913 	unsigned			in_iowait:1;
914 #ifndef TIF_RESTORE_SIGMASK
915 	unsigned			restore_sigmask:1;
916 #endif
917 #ifdef CONFIG_MEMCG
918 	unsigned			in_user_fault:1;
919 #endif
920 #ifdef CONFIG_LRU_GEN
921 	/* whether the LRU algorithm may apply to this access */
922 	unsigned			in_lru_fault:1;
923 #endif
924 #ifdef CONFIG_COMPAT_BRK
925 	unsigned			brk_randomized:1;
926 #endif
927 #ifdef CONFIG_CGROUPS
928 	/* disallow userland-initiated cgroup migration */
929 	unsigned			no_cgroup_migration:1;
930 	/* task is frozen/stopped (used by the cgroup freezer) */
931 	unsigned			frozen:1;
932 #endif
933 #ifdef CONFIG_BLK_CGROUP
934 	unsigned			use_memdelay:1;
935 #endif
936 #ifdef CONFIG_PSI
937 	/* Stalled due to lack of memory */
938 	unsigned			in_memstall:1;
939 #endif
940 #ifdef CONFIG_PAGE_OWNER
941 	/* Used by page_owner=on to detect recursion in page tracking. */
942 	unsigned			in_page_owner:1;
943 #endif
944 #ifdef CONFIG_EVENTFD
945 	/* Recursion prevention for eventfd_signal() */
946 	unsigned			in_eventfd:1;
947 #endif
948 #ifdef CONFIG_IOMMU_SVA
949 	unsigned			pasid_activated:1;
950 #endif
951 #ifdef	CONFIG_CPU_SUP_INTEL
952 	unsigned			reported_split_lock:1;
953 #endif
954 #ifdef CONFIG_TASK_DELAY_ACCT
955 	/* delay due to memory thrashing */
956 	unsigned                        in_thrashing:1;
957 #endif
958 
959 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
960 
961 	struct restart_block		restart_block;
962 
963 	pid_t				pid;
964 	pid_t				tgid;
965 
966 #ifdef CONFIG_STACKPROTECTOR
967 	/* Canary value for the -fstack-protector GCC feature: */
968 	unsigned long			stack_canary;
969 #endif
970 	/*
971 	 * Pointers to the (original) parent process, youngest child, younger sibling,
972 	 * older sibling, respectively.  (p->father can be replaced with
973 	 * p->real_parent->pid)
974 	 */
975 
976 	/* Real parent process: */
977 	struct task_struct __rcu	*real_parent;
978 
979 	/* Recipient of SIGCHLD, wait4() reports: */
980 	struct task_struct __rcu	*parent;
981 
982 	/*
983 	 * Children/sibling form the list of natural children:
984 	 */
985 	struct list_head		children;
986 	struct list_head		sibling;
987 	struct task_struct		*group_leader;
988 
989 	/*
990 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
991 	 *
992 	 * This includes both natural children and PTRACE_ATTACH targets.
993 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
994 	 */
995 	struct list_head		ptraced;
996 	struct list_head		ptrace_entry;
997 
998 	/* PID/PID hash table linkage. */
999 	struct pid			*thread_pid;
1000 	struct hlist_node		pid_links[PIDTYPE_MAX];
1001 	struct list_head		thread_group;
1002 	struct list_head		thread_node;
1003 
1004 	struct completion		*vfork_done;
1005 
1006 	/* CLONE_CHILD_SETTID: */
1007 	int __user			*set_child_tid;
1008 
1009 	/* CLONE_CHILD_CLEARTID: */
1010 	int __user			*clear_child_tid;
1011 
1012 	/* PF_KTHREAD | PF_IO_WORKER */
1013 	void				*worker_private;
1014 
1015 	u64				utime;
1016 	u64				stime;
1017 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1018 	u64				utimescaled;
1019 	u64				stimescaled;
1020 #endif
1021 	u64				gtime;
1022 	struct prev_cputime		prev_cputime;
1023 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1024 	struct vtime			vtime;
1025 #endif
1026 
1027 #ifdef CONFIG_NO_HZ_FULL
1028 	atomic_t			tick_dep_mask;
1029 #endif
1030 	/* Context switch counts: */
1031 	unsigned long			nvcsw;
1032 	unsigned long			nivcsw;
1033 
1034 	/* Monotonic time in nsecs: */
1035 	u64				start_time;
1036 
1037 	/* Boot based time in nsecs: */
1038 	u64				start_boottime;
1039 
1040 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1041 	unsigned long			min_flt;
1042 	unsigned long			maj_flt;
1043 
1044 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
1045 	struct posix_cputimers		posix_cputimers;
1046 
1047 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1048 	struct posix_cputimers_work	posix_cputimers_work;
1049 #endif
1050 
1051 	/* Process credentials: */
1052 
1053 	/* Tracer's credentials at attach: */
1054 	const struct cred __rcu		*ptracer_cred;
1055 
1056 	/* Objective and real subjective task credentials (COW): */
1057 	const struct cred __rcu		*real_cred;
1058 
1059 	/* Effective (overridable) subjective task credentials (COW): */
1060 	const struct cred __rcu		*cred;
1061 
1062 #ifdef CONFIG_KEYS
1063 	/* Cached requested key. */
1064 	struct key			*cached_requested_key;
1065 #endif
1066 
1067 	/*
1068 	 * executable name, excluding path.
1069 	 *
1070 	 * - normally initialized setup_new_exec()
1071 	 * - access it with [gs]et_task_comm()
1072 	 * - lock it with task_lock()
1073 	 */
1074 	char				comm[TASK_COMM_LEN];
1075 
1076 	struct nameidata		*nameidata;
1077 
1078 #ifdef CONFIG_SYSVIPC
1079 	struct sysv_sem			sysvsem;
1080 	struct sysv_shm			sysvshm;
1081 #endif
1082 #ifdef CONFIG_DETECT_HUNG_TASK
1083 	unsigned long			last_switch_count;
1084 	unsigned long			last_switch_time;
1085 #endif
1086 	/* Filesystem information: */
1087 	struct fs_struct		*fs;
1088 
1089 	/* Open file information: */
1090 	struct files_struct		*files;
1091 
1092 #ifdef CONFIG_IO_URING
1093 	struct io_uring_task		*io_uring;
1094 #endif
1095 
1096 	/* Namespaces: */
1097 	struct nsproxy			*nsproxy;
1098 
1099 	/* Signal handlers: */
1100 	struct signal_struct		*signal;
1101 	struct sighand_struct __rcu		*sighand;
1102 	sigset_t			blocked;
1103 	sigset_t			real_blocked;
1104 	/* Restored if set_restore_sigmask() was used: */
1105 	sigset_t			saved_sigmask;
1106 	struct sigpending		pending;
1107 	unsigned long			sas_ss_sp;
1108 	size_t				sas_ss_size;
1109 	unsigned int			sas_ss_flags;
1110 
1111 	struct callback_head		*task_works;
1112 
1113 #ifdef CONFIG_AUDIT
1114 #ifdef CONFIG_AUDITSYSCALL
1115 	struct audit_context		*audit_context;
1116 #endif
1117 	kuid_t				loginuid;
1118 	unsigned int			sessionid;
1119 #endif
1120 	struct seccomp			seccomp;
1121 	struct syscall_user_dispatch	syscall_dispatch;
1122 
1123 	/* Thread group tracking: */
1124 	u64				parent_exec_id;
1125 	u64				self_exec_id;
1126 
1127 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1128 	spinlock_t			alloc_lock;
1129 
1130 	/* Protection of the PI data structures: */
1131 	raw_spinlock_t			pi_lock;
1132 
1133 	struct wake_q_node		wake_q;
1134 
1135 #ifdef CONFIG_RT_MUTEXES
1136 	/* PI waiters blocked on a rt_mutex held by this task: */
1137 	struct rb_root_cached		pi_waiters;
1138 	/* Updated under owner's pi_lock and rq lock */
1139 	struct task_struct		*pi_top_task;
1140 	/* Deadlock detection and priority inheritance handling: */
1141 	struct rt_mutex_waiter		*pi_blocked_on;
1142 #endif
1143 
1144 #ifdef CONFIG_DEBUG_MUTEXES
1145 	/* Mutex deadlock detection: */
1146 	struct mutex_waiter		*blocked_on;
1147 #endif
1148 
1149 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1150 	int				non_block_count;
1151 #endif
1152 
1153 #ifdef CONFIG_TRACE_IRQFLAGS
1154 	struct irqtrace_events		irqtrace;
1155 	unsigned int			hardirq_threaded;
1156 	u64				hardirq_chain_key;
1157 	int				softirqs_enabled;
1158 	int				softirq_context;
1159 	int				irq_config;
1160 #endif
1161 #ifdef CONFIG_PREEMPT_RT
1162 	int				softirq_disable_cnt;
1163 #endif
1164 
1165 #ifdef CONFIG_LOCKDEP
1166 # define MAX_LOCK_DEPTH			48UL
1167 	u64				curr_chain_key;
1168 	int				lockdep_depth;
1169 	unsigned int			lockdep_recursion;
1170 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1171 #endif
1172 
1173 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1174 	unsigned int			in_ubsan;
1175 #endif
1176 
1177 	/* Journalling filesystem info: */
1178 	void				*journal_info;
1179 
1180 	/* Stacked block device info: */
1181 	struct bio_list			*bio_list;
1182 
1183 	/* Stack plugging: */
1184 	struct blk_plug			*plug;
1185 
1186 	/* VM state: */
1187 	struct reclaim_state		*reclaim_state;
1188 
1189 	struct backing_dev_info		*backing_dev_info;
1190 
1191 	struct io_context		*io_context;
1192 
1193 #ifdef CONFIG_COMPACTION
1194 	struct capture_control		*capture_control;
1195 #endif
1196 	/* Ptrace state: */
1197 	unsigned long			ptrace_message;
1198 	kernel_siginfo_t		*last_siginfo;
1199 
1200 	struct task_io_accounting	ioac;
1201 #ifdef CONFIG_PSI
1202 	/* Pressure stall state */
1203 	unsigned int			psi_flags;
1204 #endif
1205 #ifdef CONFIG_TASK_XACCT
1206 	/* Accumulated RSS usage: */
1207 	u64				acct_rss_mem1;
1208 	/* Accumulated virtual memory usage: */
1209 	u64				acct_vm_mem1;
1210 	/* stime + utime since last update: */
1211 	u64				acct_timexpd;
1212 #endif
1213 #ifdef CONFIG_CPUSETS
1214 	/* Protected by ->alloc_lock: */
1215 	nodemask_t			mems_allowed;
1216 	/* Sequence number to catch updates: */
1217 	seqcount_spinlock_t		mems_allowed_seq;
1218 	int				cpuset_mem_spread_rotor;
1219 	int				cpuset_slab_spread_rotor;
1220 #endif
1221 #ifdef CONFIG_CGROUPS
1222 	/* Control Group info protected by css_set_lock: */
1223 	struct css_set __rcu		*cgroups;
1224 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1225 	struct list_head		cg_list;
1226 #endif
1227 #ifdef CONFIG_X86_CPU_RESCTRL
1228 	u32				closid;
1229 	u32				rmid;
1230 #endif
1231 #ifdef CONFIG_FUTEX
1232 	struct robust_list_head __user	*robust_list;
1233 #ifdef CONFIG_COMPAT
1234 	struct compat_robust_list_head __user *compat_robust_list;
1235 #endif
1236 	struct list_head		pi_state_list;
1237 	struct futex_pi_state		*pi_state_cache;
1238 	struct mutex			futex_exit_mutex;
1239 	unsigned int			futex_state;
1240 #endif
1241 #ifdef CONFIG_PERF_EVENTS
1242 	struct perf_event_context	*perf_event_ctxp;
1243 	struct mutex			perf_event_mutex;
1244 	struct list_head		perf_event_list;
1245 #endif
1246 #ifdef CONFIG_DEBUG_PREEMPT
1247 	unsigned long			preempt_disable_ip;
1248 #endif
1249 #ifdef CONFIG_NUMA
1250 	/* Protected by alloc_lock: */
1251 	struct mempolicy		*mempolicy;
1252 	short				il_prev;
1253 	short				pref_node_fork;
1254 #endif
1255 #ifdef CONFIG_NUMA_BALANCING
1256 	int				numa_scan_seq;
1257 	unsigned int			numa_scan_period;
1258 	unsigned int			numa_scan_period_max;
1259 	int				numa_preferred_nid;
1260 	unsigned long			numa_migrate_retry;
1261 	/* Migration stamp: */
1262 	u64				node_stamp;
1263 	u64				last_task_numa_placement;
1264 	u64				last_sum_exec_runtime;
1265 	struct callback_head		numa_work;
1266 
1267 	/*
1268 	 * This pointer is only modified for current in syscall and
1269 	 * pagefault context (and for tasks being destroyed), so it can be read
1270 	 * from any of the following contexts:
1271 	 *  - RCU read-side critical section
1272 	 *  - current->numa_group from everywhere
1273 	 *  - task's runqueue locked, task not running
1274 	 */
1275 	struct numa_group __rcu		*numa_group;
1276 
1277 	/*
1278 	 * numa_faults is an array split into four regions:
1279 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1280 	 * in this precise order.
1281 	 *
1282 	 * faults_memory: Exponential decaying average of faults on a per-node
1283 	 * basis. Scheduling placement decisions are made based on these
1284 	 * counts. The values remain static for the duration of a PTE scan.
1285 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1286 	 * hinting fault was incurred.
1287 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1288 	 * during the current scan window. When the scan completes, the counts
1289 	 * in faults_memory and faults_cpu decay and these values are copied.
1290 	 */
1291 	unsigned long			*numa_faults;
1292 	unsigned long			total_numa_faults;
1293 
1294 	/*
1295 	 * numa_faults_locality tracks if faults recorded during the last
1296 	 * scan window were remote/local or failed to migrate. The task scan
1297 	 * period is adapted based on the locality of the faults with different
1298 	 * weights depending on whether they were shared or private faults
1299 	 */
1300 	unsigned long			numa_faults_locality[3];
1301 
1302 	unsigned long			numa_pages_migrated;
1303 #endif /* CONFIG_NUMA_BALANCING */
1304 
1305 #ifdef CONFIG_RSEQ
1306 	struct rseq __user *rseq;
1307 	u32 rseq_len;
1308 	u32 rseq_sig;
1309 	/*
1310 	 * RmW on rseq_event_mask must be performed atomically
1311 	 * with respect to preemption.
1312 	 */
1313 	unsigned long rseq_event_mask;
1314 #endif
1315 
1316 #ifdef CONFIG_SCHED_MM_CID
1317 	int				mm_cid;		/* Current cid in mm */
1318 	int				last_mm_cid;	/* Most recent cid in mm */
1319 	int				migrate_from_cpu;
1320 	int				mm_cid_active;	/* Whether cid bitmap is active */
1321 	struct callback_head		cid_work;
1322 #endif
1323 
1324 	struct tlbflush_unmap_batch	tlb_ubc;
1325 
1326 	/* Cache last used pipe for splice(): */
1327 	struct pipe_inode_info		*splice_pipe;
1328 
1329 	struct page_frag		task_frag;
1330 
1331 #ifdef CONFIG_TASK_DELAY_ACCT
1332 	struct task_delay_info		*delays;
1333 #endif
1334 
1335 #ifdef CONFIG_FAULT_INJECTION
1336 	int				make_it_fail;
1337 	unsigned int			fail_nth;
1338 #endif
1339 	/*
1340 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1341 	 * balance_dirty_pages() for a dirty throttling pause:
1342 	 */
1343 	int				nr_dirtied;
1344 	int				nr_dirtied_pause;
1345 	/* Start of a write-and-pause period: */
1346 	unsigned long			dirty_paused_when;
1347 
1348 #ifdef CONFIG_LATENCYTOP
1349 	int				latency_record_count;
1350 	struct latency_record		latency_record[LT_SAVECOUNT];
1351 #endif
1352 	/*
1353 	 * Time slack values; these are used to round up poll() and
1354 	 * select() etc timeout values. These are in nanoseconds.
1355 	 */
1356 	u64				timer_slack_ns;
1357 	u64				default_timer_slack_ns;
1358 
1359 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1360 	unsigned int			kasan_depth;
1361 #endif
1362 
1363 #ifdef CONFIG_KCSAN
1364 	struct kcsan_ctx		kcsan_ctx;
1365 #ifdef CONFIG_TRACE_IRQFLAGS
1366 	struct irqtrace_events		kcsan_save_irqtrace;
1367 #endif
1368 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1369 	int				kcsan_stack_depth;
1370 #endif
1371 #endif
1372 
1373 #ifdef CONFIG_KMSAN
1374 	struct kmsan_ctx		kmsan_ctx;
1375 #endif
1376 
1377 #if IS_ENABLED(CONFIG_KUNIT)
1378 	struct kunit			*kunit_test;
1379 #endif
1380 
1381 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1382 	/* Index of current stored address in ret_stack: */
1383 	int				curr_ret_stack;
1384 	int				curr_ret_depth;
1385 
1386 	/* Stack of return addresses for return function tracing: */
1387 	struct ftrace_ret_stack		*ret_stack;
1388 
1389 	/* Timestamp for last schedule: */
1390 	unsigned long long		ftrace_timestamp;
1391 
1392 	/*
1393 	 * Number of functions that haven't been traced
1394 	 * because of depth overrun:
1395 	 */
1396 	atomic_t			trace_overrun;
1397 
1398 	/* Pause tracing: */
1399 	atomic_t			tracing_graph_pause;
1400 #endif
1401 
1402 #ifdef CONFIG_TRACING
1403 	/* Bitmask and counter of trace recursion: */
1404 	unsigned long			trace_recursion;
1405 #endif /* CONFIG_TRACING */
1406 
1407 #ifdef CONFIG_KCOV
1408 	/* See kernel/kcov.c for more details. */
1409 
1410 	/* Coverage collection mode enabled for this task (0 if disabled): */
1411 	unsigned int			kcov_mode;
1412 
1413 	/* Size of the kcov_area: */
1414 	unsigned int			kcov_size;
1415 
1416 	/* Buffer for coverage collection: */
1417 	void				*kcov_area;
1418 
1419 	/* KCOV descriptor wired with this task or NULL: */
1420 	struct kcov			*kcov;
1421 
1422 	/* KCOV common handle for remote coverage collection: */
1423 	u64				kcov_handle;
1424 
1425 	/* KCOV sequence number: */
1426 	int				kcov_sequence;
1427 
1428 	/* Collect coverage from softirq context: */
1429 	unsigned int			kcov_softirq;
1430 #endif
1431 
1432 #ifdef CONFIG_MEMCG
1433 	struct mem_cgroup		*memcg_in_oom;
1434 	gfp_t				memcg_oom_gfp_mask;
1435 	int				memcg_oom_order;
1436 
1437 	/* Number of pages to reclaim on returning to userland: */
1438 	unsigned int			memcg_nr_pages_over_high;
1439 
1440 	/* Used by memcontrol for targeted memcg charge: */
1441 	struct mem_cgroup		*active_memcg;
1442 #endif
1443 
1444 #ifdef CONFIG_BLK_CGROUP
1445 	struct gendisk			*throttle_disk;
1446 #endif
1447 
1448 #ifdef CONFIG_UPROBES
1449 	struct uprobe_task		*utask;
1450 #endif
1451 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1452 	unsigned int			sequential_io;
1453 	unsigned int			sequential_io_avg;
1454 #endif
1455 	struct kmap_ctrl		kmap_ctrl;
1456 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1457 	unsigned long			task_state_change;
1458 # ifdef CONFIG_PREEMPT_RT
1459 	unsigned long			saved_state_change;
1460 # endif
1461 #endif
1462 	struct rcu_head			rcu;
1463 	refcount_t			rcu_users;
1464 	int				pagefault_disabled;
1465 #ifdef CONFIG_MMU
1466 	struct task_struct		*oom_reaper_list;
1467 	struct timer_list		oom_reaper_timer;
1468 #endif
1469 #ifdef CONFIG_VMAP_STACK
1470 	struct vm_struct		*stack_vm_area;
1471 #endif
1472 #ifdef CONFIG_THREAD_INFO_IN_TASK
1473 	/* A live task holds one reference: */
1474 	refcount_t			stack_refcount;
1475 #endif
1476 #ifdef CONFIG_LIVEPATCH
1477 	int patch_state;
1478 #endif
1479 #ifdef CONFIG_SECURITY
1480 	/* Used by LSM modules for access restriction: */
1481 	void				*security;
1482 #endif
1483 #ifdef CONFIG_BPF_SYSCALL
1484 	/* Used by BPF task local storage */
1485 	struct bpf_local_storage __rcu	*bpf_storage;
1486 	/* Used for BPF run context */
1487 	struct bpf_run_ctx		*bpf_ctx;
1488 #endif
1489 
1490 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1491 	unsigned long			lowest_stack;
1492 	unsigned long			prev_lowest_stack;
1493 #endif
1494 
1495 #ifdef CONFIG_X86_MCE
1496 	void __user			*mce_vaddr;
1497 	__u64				mce_kflags;
1498 	u64				mce_addr;
1499 	__u64				mce_ripv : 1,
1500 					mce_whole_page : 1,
1501 					__mce_reserved : 62;
1502 	struct callback_head		mce_kill_me;
1503 	int				mce_count;
1504 #endif
1505 
1506 #ifdef CONFIG_KRETPROBES
1507 	struct llist_head               kretprobe_instances;
1508 #endif
1509 #ifdef CONFIG_RETHOOK
1510 	struct llist_head               rethooks;
1511 #endif
1512 
1513 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1514 	/*
1515 	 * If L1D flush is supported on mm context switch
1516 	 * then we use this callback head to queue kill work
1517 	 * to kill tasks that are not running on SMT disabled
1518 	 * cores
1519 	 */
1520 	struct callback_head		l1d_flush_kill;
1521 #endif
1522 
1523 #ifdef CONFIG_RV
1524 	/*
1525 	 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1526 	 * If we find justification for more monitors, we can think
1527 	 * about adding more or developing a dynamic method. So far,
1528 	 * none of these are justified.
1529 	 */
1530 	union rv_task_monitor		rv[RV_PER_TASK_MONITORS];
1531 #endif
1532 
1533 #ifdef CONFIG_USER_EVENTS
1534 	struct user_event_mm		*user_event_mm;
1535 #endif
1536 
1537 	/*
1538 	 * New fields for task_struct should be added above here, so that
1539 	 * they are included in the randomized portion of task_struct.
1540 	 */
1541 	randomized_struct_fields_end
1542 
1543 	/* CPU-specific state of this task: */
1544 	struct thread_struct		thread;
1545 
1546 	/*
1547 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1548 	 * structure.  It *MUST* be at the end of 'task_struct'.
1549 	 *
1550 	 * Do not put anything below here!
1551 	 */
1552 };
1553 
1554 static inline struct pid *task_pid(struct task_struct *task)
1555 {
1556 	return task->thread_pid;
1557 }
1558 
1559 /*
1560  * the helpers to get the task's different pids as they are seen
1561  * from various namespaces
1562  *
1563  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1564  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1565  *                     current.
1566  * task_xid_nr_ns()  : id seen from the ns specified;
1567  *
1568  * see also pid_nr() etc in include/linux/pid.h
1569  */
1570 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1571 
1572 static inline pid_t task_pid_nr(struct task_struct *tsk)
1573 {
1574 	return tsk->pid;
1575 }
1576 
1577 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1578 {
1579 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1580 }
1581 
1582 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1583 {
1584 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1585 }
1586 
1587 
1588 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1589 {
1590 	return tsk->tgid;
1591 }
1592 
1593 /**
1594  * pid_alive - check that a task structure is not stale
1595  * @p: Task structure to be checked.
1596  *
1597  * Test if a process is not yet dead (at most zombie state)
1598  * If pid_alive fails, then pointers within the task structure
1599  * can be stale and must not be dereferenced.
1600  *
1601  * Return: 1 if the process is alive. 0 otherwise.
1602  */
1603 static inline int pid_alive(const struct task_struct *p)
1604 {
1605 	return p->thread_pid != NULL;
1606 }
1607 
1608 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1609 {
1610 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1611 }
1612 
1613 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1614 {
1615 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1616 }
1617 
1618 
1619 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1620 {
1621 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1622 }
1623 
1624 static inline pid_t task_session_vnr(struct task_struct *tsk)
1625 {
1626 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1627 }
1628 
1629 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1630 {
1631 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1632 }
1633 
1634 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1635 {
1636 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1637 }
1638 
1639 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1640 {
1641 	pid_t pid = 0;
1642 
1643 	rcu_read_lock();
1644 	if (pid_alive(tsk))
1645 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1646 	rcu_read_unlock();
1647 
1648 	return pid;
1649 }
1650 
1651 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1652 {
1653 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1654 }
1655 
1656 /* Obsolete, do not use: */
1657 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1658 {
1659 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1660 }
1661 
1662 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1663 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1664 
1665 static inline unsigned int __task_state_index(unsigned int tsk_state,
1666 					      unsigned int tsk_exit_state)
1667 {
1668 	unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1669 
1670 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1671 
1672 	if (tsk_state == TASK_IDLE)
1673 		state = TASK_REPORT_IDLE;
1674 
1675 	/*
1676 	 * We're lying here, but rather than expose a completely new task state
1677 	 * to userspace, we can make this appear as if the task has gone through
1678 	 * a regular rt_mutex_lock() call.
1679 	 */
1680 	if (tsk_state == TASK_RTLOCK_WAIT)
1681 		state = TASK_UNINTERRUPTIBLE;
1682 
1683 	return fls(state);
1684 }
1685 
1686 static inline unsigned int task_state_index(struct task_struct *tsk)
1687 {
1688 	return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1689 }
1690 
1691 static inline char task_index_to_char(unsigned int state)
1692 {
1693 	static const char state_char[] = "RSDTtXZPI";
1694 
1695 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1696 
1697 	return state_char[state];
1698 }
1699 
1700 static inline char task_state_to_char(struct task_struct *tsk)
1701 {
1702 	return task_index_to_char(task_state_index(tsk));
1703 }
1704 
1705 /**
1706  * is_global_init - check if a task structure is init. Since init
1707  * is free to have sub-threads we need to check tgid.
1708  * @tsk: Task structure to be checked.
1709  *
1710  * Check if a task structure is the first user space task the kernel created.
1711  *
1712  * Return: 1 if the task structure is init. 0 otherwise.
1713  */
1714 static inline int is_global_init(struct task_struct *tsk)
1715 {
1716 	return task_tgid_nr(tsk) == 1;
1717 }
1718 
1719 extern struct pid *cad_pid;
1720 
1721 /*
1722  * Per process flags
1723  */
1724 #define PF_VCPU			0x00000001	/* I'm a virtual CPU */
1725 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1726 #define PF_EXITING		0x00000004	/* Getting shut down */
1727 #define PF_POSTCOREDUMP		0x00000008	/* Coredumps should ignore this task */
1728 #define PF_IO_WORKER		0x00000010	/* Task is an IO worker */
1729 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1730 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1731 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1732 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1733 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1734 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1735 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1736 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1737 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1738 #define PF_USER_WORKER		0x00004000	/* Kernel thread cloned from userspace thread */
1739 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1740 #define PF__HOLE__00010000	0x00010000
1741 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1742 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1743 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1744 #define PF_LOCAL_THROTTLE	0x00100000	/* Throttle writes only against the bdi I write to,
1745 						 * I am cleaning dirty pages from some other bdi. */
1746 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1747 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1748 #define PF__HOLE__00800000	0x00800000
1749 #define PF__HOLE__01000000	0x01000000
1750 #define PF__HOLE__02000000	0x02000000
1751 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1752 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1753 #define PF_MEMALLOC_PIN		0x10000000	/* Allocation context constrained to zones which allow long term pinning. */
1754 #define PF__HOLE__20000000	0x20000000
1755 #define PF__HOLE__40000000	0x40000000
1756 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1757 
1758 /*
1759  * Only the _current_ task can read/write to tsk->flags, but other
1760  * tasks can access tsk->flags in readonly mode for example
1761  * with tsk_used_math (like during threaded core dumping).
1762  * There is however an exception to this rule during ptrace
1763  * or during fork: the ptracer task is allowed to write to the
1764  * child->flags of its traced child (same goes for fork, the parent
1765  * can write to the child->flags), because we're guaranteed the
1766  * child is not running and in turn not changing child->flags
1767  * at the same time the parent does it.
1768  */
1769 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1770 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1771 #define clear_used_math()			clear_stopped_child_used_math(current)
1772 #define set_used_math()				set_stopped_child_used_math(current)
1773 
1774 #define conditional_stopped_child_used_math(condition, child) \
1775 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1776 
1777 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1778 
1779 #define copy_to_stopped_child_used_math(child) \
1780 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1781 
1782 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1783 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1784 #define used_math()				tsk_used_math(current)
1785 
1786 static __always_inline bool is_percpu_thread(void)
1787 {
1788 #ifdef CONFIG_SMP
1789 	return (current->flags & PF_NO_SETAFFINITY) &&
1790 		(current->nr_cpus_allowed  == 1);
1791 #else
1792 	return true;
1793 #endif
1794 }
1795 
1796 /* Per-process atomic flags. */
1797 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1798 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1799 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1800 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1801 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1802 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1803 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1804 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1805 
1806 #define TASK_PFA_TEST(name, func)					\
1807 	static inline bool task_##func(struct task_struct *p)		\
1808 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1809 
1810 #define TASK_PFA_SET(name, func)					\
1811 	static inline void task_set_##func(struct task_struct *p)	\
1812 	{ set_bit(PFA_##name, &p->atomic_flags); }
1813 
1814 #define TASK_PFA_CLEAR(name, func)					\
1815 	static inline void task_clear_##func(struct task_struct *p)	\
1816 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1817 
1818 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1819 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1820 
1821 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1822 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1823 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1824 
1825 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1826 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1827 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1828 
1829 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1830 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1831 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1832 
1833 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1834 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1835 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1836 
1837 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1838 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1839 
1840 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1841 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1842 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1843 
1844 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1845 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1846 
1847 static inline void
1848 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1849 {
1850 	current->flags &= ~flags;
1851 	current->flags |= orig_flags & flags;
1852 }
1853 
1854 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1855 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1856 #ifdef CONFIG_SMP
1857 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1858 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1859 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1860 extern void release_user_cpus_ptr(struct task_struct *p);
1861 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1862 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1863 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1864 #else
1865 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1866 {
1867 }
1868 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1869 {
1870 	if (!cpumask_test_cpu(0, new_mask))
1871 		return -EINVAL;
1872 	return 0;
1873 }
1874 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1875 {
1876 	if (src->user_cpus_ptr)
1877 		return -EINVAL;
1878 	return 0;
1879 }
1880 static inline void release_user_cpus_ptr(struct task_struct *p)
1881 {
1882 	WARN_ON(p->user_cpus_ptr);
1883 }
1884 
1885 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1886 {
1887 	return 0;
1888 }
1889 #endif
1890 
1891 extern int yield_to(struct task_struct *p, bool preempt);
1892 extern void set_user_nice(struct task_struct *p, long nice);
1893 extern int task_prio(const struct task_struct *p);
1894 
1895 /**
1896  * task_nice - return the nice value of a given task.
1897  * @p: the task in question.
1898  *
1899  * Return: The nice value [ -20 ... 0 ... 19 ].
1900  */
1901 static inline int task_nice(const struct task_struct *p)
1902 {
1903 	return PRIO_TO_NICE((p)->static_prio);
1904 }
1905 
1906 extern int can_nice(const struct task_struct *p, const int nice);
1907 extern int task_curr(const struct task_struct *p);
1908 extern int idle_cpu(int cpu);
1909 extern int available_idle_cpu(int cpu);
1910 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1911 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1912 extern void sched_set_fifo(struct task_struct *p);
1913 extern void sched_set_fifo_low(struct task_struct *p);
1914 extern void sched_set_normal(struct task_struct *p, int nice);
1915 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1916 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1917 extern struct task_struct *idle_task(int cpu);
1918 
1919 /**
1920  * is_idle_task - is the specified task an idle task?
1921  * @p: the task in question.
1922  *
1923  * Return: 1 if @p is an idle task. 0 otherwise.
1924  */
1925 static __always_inline bool is_idle_task(const struct task_struct *p)
1926 {
1927 	return !!(p->flags & PF_IDLE);
1928 }
1929 
1930 extern struct task_struct *curr_task(int cpu);
1931 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1932 
1933 void yield(void);
1934 
1935 union thread_union {
1936 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1937 	struct task_struct task;
1938 #endif
1939 #ifndef CONFIG_THREAD_INFO_IN_TASK
1940 	struct thread_info thread_info;
1941 #endif
1942 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1943 };
1944 
1945 #ifndef CONFIG_THREAD_INFO_IN_TASK
1946 extern struct thread_info init_thread_info;
1947 #endif
1948 
1949 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1950 
1951 #ifdef CONFIG_THREAD_INFO_IN_TASK
1952 # define task_thread_info(task)	(&(task)->thread_info)
1953 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1954 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1955 #endif
1956 
1957 /*
1958  * find a task by one of its numerical ids
1959  *
1960  * find_task_by_pid_ns():
1961  *      finds a task by its pid in the specified namespace
1962  * find_task_by_vpid():
1963  *      finds a task by its virtual pid
1964  *
1965  * see also find_vpid() etc in include/linux/pid.h
1966  */
1967 
1968 extern struct task_struct *find_task_by_vpid(pid_t nr);
1969 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1970 
1971 /*
1972  * find a task by its virtual pid and get the task struct
1973  */
1974 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1975 
1976 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1977 extern int wake_up_process(struct task_struct *tsk);
1978 extern void wake_up_new_task(struct task_struct *tsk);
1979 
1980 #ifdef CONFIG_SMP
1981 extern void kick_process(struct task_struct *tsk);
1982 #else
1983 static inline void kick_process(struct task_struct *tsk) { }
1984 #endif
1985 
1986 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1987 
1988 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1989 {
1990 	__set_task_comm(tsk, from, false);
1991 }
1992 
1993 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1994 #define get_task_comm(buf, tsk) ({			\
1995 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1996 	__get_task_comm(buf, sizeof(buf), tsk);		\
1997 })
1998 
1999 #ifdef CONFIG_SMP
2000 static __always_inline void scheduler_ipi(void)
2001 {
2002 	/*
2003 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
2004 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
2005 	 * this IPI.
2006 	 */
2007 	preempt_fold_need_resched();
2008 }
2009 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2010 #else
2011 static inline void scheduler_ipi(void) { }
2012 static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2013 {
2014 	return 1;
2015 }
2016 #endif
2017 
2018 /*
2019  * Set thread flags in other task's structures.
2020  * See asm/thread_info.h for TIF_xxxx flags available:
2021  */
2022 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2023 {
2024 	set_ti_thread_flag(task_thread_info(tsk), flag);
2025 }
2026 
2027 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2028 {
2029 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2030 }
2031 
2032 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2033 					  bool value)
2034 {
2035 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
2036 }
2037 
2038 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2039 {
2040 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2041 }
2042 
2043 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2044 {
2045 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2046 }
2047 
2048 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2049 {
2050 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2051 }
2052 
2053 static inline void set_tsk_need_resched(struct task_struct *tsk)
2054 {
2055 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2056 }
2057 
2058 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2059 {
2060 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2061 }
2062 
2063 static inline int test_tsk_need_resched(struct task_struct *tsk)
2064 {
2065 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2066 }
2067 
2068 /*
2069  * cond_resched() and cond_resched_lock(): latency reduction via
2070  * explicit rescheduling in places that are safe. The return
2071  * value indicates whether a reschedule was done in fact.
2072  * cond_resched_lock() will drop the spinlock before scheduling,
2073  */
2074 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2075 extern int __cond_resched(void);
2076 
2077 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2078 
2079 void sched_dynamic_klp_enable(void);
2080 void sched_dynamic_klp_disable(void);
2081 
2082 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2083 
2084 static __always_inline int _cond_resched(void)
2085 {
2086 	return static_call_mod(cond_resched)();
2087 }
2088 
2089 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2090 
2091 extern int dynamic_cond_resched(void);
2092 
2093 static __always_inline int _cond_resched(void)
2094 {
2095 	return dynamic_cond_resched();
2096 }
2097 
2098 #else /* !CONFIG_PREEMPTION */
2099 
2100 static inline int _cond_resched(void)
2101 {
2102 	klp_sched_try_switch();
2103 	return __cond_resched();
2104 }
2105 
2106 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
2107 
2108 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
2109 
2110 static inline int _cond_resched(void)
2111 {
2112 	klp_sched_try_switch();
2113 	return 0;
2114 }
2115 
2116 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
2117 
2118 #define cond_resched() ({			\
2119 	__might_resched(__FILE__, __LINE__, 0);	\
2120 	_cond_resched();			\
2121 })
2122 
2123 extern int __cond_resched_lock(spinlock_t *lock);
2124 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2125 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2126 
2127 #define MIGHT_RESCHED_RCU_SHIFT		8
2128 #define MIGHT_RESCHED_PREEMPT_MASK	((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2129 
2130 #ifndef CONFIG_PREEMPT_RT
2131 /*
2132  * Non RT kernels have an elevated preempt count due to the held lock,
2133  * but are not allowed to be inside a RCU read side critical section
2134  */
2135 # define PREEMPT_LOCK_RESCHED_OFFSETS	PREEMPT_LOCK_OFFSET
2136 #else
2137 /*
2138  * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2139  * cond_resched*lock() has to take that into account because it checks for
2140  * preempt_count() and rcu_preempt_depth().
2141  */
2142 # define PREEMPT_LOCK_RESCHED_OFFSETS	\
2143 	(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2144 #endif
2145 
2146 #define cond_resched_lock(lock) ({						\
2147 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2148 	__cond_resched_lock(lock);						\
2149 })
2150 
2151 #define cond_resched_rwlock_read(lock) ({					\
2152 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2153 	__cond_resched_rwlock_read(lock);					\
2154 })
2155 
2156 #define cond_resched_rwlock_write(lock) ({					\
2157 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2158 	__cond_resched_rwlock_write(lock);					\
2159 })
2160 
2161 static inline void cond_resched_rcu(void)
2162 {
2163 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2164 	rcu_read_unlock();
2165 	cond_resched();
2166 	rcu_read_lock();
2167 #endif
2168 }
2169 
2170 #ifdef CONFIG_PREEMPT_DYNAMIC
2171 
2172 extern bool preempt_model_none(void);
2173 extern bool preempt_model_voluntary(void);
2174 extern bool preempt_model_full(void);
2175 
2176 #else
2177 
2178 static inline bool preempt_model_none(void)
2179 {
2180 	return IS_ENABLED(CONFIG_PREEMPT_NONE);
2181 }
2182 static inline bool preempt_model_voluntary(void)
2183 {
2184 	return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2185 }
2186 static inline bool preempt_model_full(void)
2187 {
2188 	return IS_ENABLED(CONFIG_PREEMPT);
2189 }
2190 
2191 #endif
2192 
2193 static inline bool preempt_model_rt(void)
2194 {
2195 	return IS_ENABLED(CONFIG_PREEMPT_RT);
2196 }
2197 
2198 /*
2199  * Does the preemption model allow non-cooperative preemption?
2200  *
2201  * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
2202  * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
2203  * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
2204  * PREEMPT_NONE model.
2205  */
2206 static inline bool preempt_model_preemptible(void)
2207 {
2208 	return preempt_model_full() || preempt_model_rt();
2209 }
2210 
2211 /*
2212  * Does a critical section need to be broken due to another
2213  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2214  * but a general need for low latency)
2215  */
2216 static inline int spin_needbreak(spinlock_t *lock)
2217 {
2218 #ifdef CONFIG_PREEMPTION
2219 	return spin_is_contended(lock);
2220 #else
2221 	return 0;
2222 #endif
2223 }
2224 
2225 /*
2226  * Check if a rwlock is contended.
2227  * Returns non-zero if there is another task waiting on the rwlock.
2228  * Returns zero if the lock is not contended or the system / underlying
2229  * rwlock implementation does not support contention detection.
2230  * Technically does not depend on CONFIG_PREEMPTION, but a general need
2231  * for low latency.
2232  */
2233 static inline int rwlock_needbreak(rwlock_t *lock)
2234 {
2235 #ifdef CONFIG_PREEMPTION
2236 	return rwlock_is_contended(lock);
2237 #else
2238 	return 0;
2239 #endif
2240 }
2241 
2242 static __always_inline bool need_resched(void)
2243 {
2244 	return unlikely(tif_need_resched());
2245 }
2246 
2247 /*
2248  * Wrappers for p->thread_info->cpu access. No-op on UP.
2249  */
2250 #ifdef CONFIG_SMP
2251 
2252 static inline unsigned int task_cpu(const struct task_struct *p)
2253 {
2254 	return READ_ONCE(task_thread_info(p)->cpu);
2255 }
2256 
2257 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2258 
2259 #else
2260 
2261 static inline unsigned int task_cpu(const struct task_struct *p)
2262 {
2263 	return 0;
2264 }
2265 
2266 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2267 {
2268 }
2269 
2270 #endif /* CONFIG_SMP */
2271 
2272 extern bool sched_task_on_rq(struct task_struct *p);
2273 extern unsigned long get_wchan(struct task_struct *p);
2274 extern struct task_struct *cpu_curr_snapshot(int cpu);
2275 
2276 /*
2277  * In order to reduce various lock holder preemption latencies provide an
2278  * interface to see if a vCPU is currently running or not.
2279  *
2280  * This allows us to terminate optimistic spin loops and block, analogous to
2281  * the native optimistic spin heuristic of testing if the lock owner task is
2282  * running or not.
2283  */
2284 #ifndef vcpu_is_preempted
2285 static inline bool vcpu_is_preempted(int cpu)
2286 {
2287 	return false;
2288 }
2289 #endif
2290 
2291 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2292 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2293 
2294 #ifndef TASK_SIZE_OF
2295 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2296 #endif
2297 
2298 #ifdef CONFIG_SMP
2299 static inline bool owner_on_cpu(struct task_struct *owner)
2300 {
2301 	/*
2302 	 * As lock holder preemption issue, we both skip spinning if
2303 	 * task is not on cpu or its cpu is preempted
2304 	 */
2305 	return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2306 }
2307 
2308 /* Returns effective CPU energy utilization, as seen by the scheduler */
2309 unsigned long sched_cpu_util(int cpu);
2310 #endif /* CONFIG_SMP */
2311 
2312 #ifdef CONFIG_RSEQ
2313 
2314 /*
2315  * Map the event mask on the user-space ABI enum rseq_cs_flags
2316  * for direct mask checks.
2317  */
2318 enum rseq_event_mask_bits {
2319 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2320 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2321 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2322 };
2323 
2324 enum rseq_event_mask {
2325 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
2326 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
2327 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
2328 };
2329 
2330 static inline void rseq_set_notify_resume(struct task_struct *t)
2331 {
2332 	if (t->rseq)
2333 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2334 }
2335 
2336 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2337 
2338 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2339 					     struct pt_regs *regs)
2340 {
2341 	if (current->rseq)
2342 		__rseq_handle_notify_resume(ksig, regs);
2343 }
2344 
2345 static inline void rseq_signal_deliver(struct ksignal *ksig,
2346 				       struct pt_regs *regs)
2347 {
2348 	preempt_disable();
2349 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2350 	preempt_enable();
2351 	rseq_handle_notify_resume(ksig, regs);
2352 }
2353 
2354 /* rseq_preempt() requires preemption to be disabled. */
2355 static inline void rseq_preempt(struct task_struct *t)
2356 {
2357 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2358 	rseq_set_notify_resume(t);
2359 }
2360 
2361 /* rseq_migrate() requires preemption to be disabled. */
2362 static inline void rseq_migrate(struct task_struct *t)
2363 {
2364 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2365 	rseq_set_notify_resume(t);
2366 }
2367 
2368 /*
2369  * If parent process has a registered restartable sequences area, the
2370  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2371  */
2372 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2373 {
2374 	if (clone_flags & CLONE_VM) {
2375 		t->rseq = NULL;
2376 		t->rseq_len = 0;
2377 		t->rseq_sig = 0;
2378 		t->rseq_event_mask = 0;
2379 	} else {
2380 		t->rseq = current->rseq;
2381 		t->rseq_len = current->rseq_len;
2382 		t->rseq_sig = current->rseq_sig;
2383 		t->rseq_event_mask = current->rseq_event_mask;
2384 	}
2385 }
2386 
2387 static inline void rseq_execve(struct task_struct *t)
2388 {
2389 	t->rseq = NULL;
2390 	t->rseq_len = 0;
2391 	t->rseq_sig = 0;
2392 	t->rseq_event_mask = 0;
2393 }
2394 
2395 #else
2396 
2397 static inline void rseq_set_notify_resume(struct task_struct *t)
2398 {
2399 }
2400 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2401 					     struct pt_regs *regs)
2402 {
2403 }
2404 static inline void rseq_signal_deliver(struct ksignal *ksig,
2405 				       struct pt_regs *regs)
2406 {
2407 }
2408 static inline void rseq_preempt(struct task_struct *t)
2409 {
2410 }
2411 static inline void rseq_migrate(struct task_struct *t)
2412 {
2413 }
2414 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2415 {
2416 }
2417 static inline void rseq_execve(struct task_struct *t)
2418 {
2419 }
2420 
2421 #endif
2422 
2423 #ifdef CONFIG_DEBUG_RSEQ
2424 
2425 void rseq_syscall(struct pt_regs *regs);
2426 
2427 #else
2428 
2429 static inline void rseq_syscall(struct pt_regs *regs)
2430 {
2431 }
2432 
2433 #endif
2434 
2435 #ifdef CONFIG_SCHED_CORE
2436 extern void sched_core_free(struct task_struct *tsk);
2437 extern void sched_core_fork(struct task_struct *p);
2438 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2439 				unsigned long uaddr);
2440 #else
2441 static inline void sched_core_free(struct task_struct *tsk) { }
2442 static inline void sched_core_fork(struct task_struct *p) { }
2443 #endif
2444 
2445 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2446 
2447 #endif
2448