1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4
5 /*
6 * Define 'struct task_struct' and provide the main scheduler
7 * APIs (schedule(), wakeup variants, etc.)
8 */
9
10 #include <uapi/linux/sched.h>
11
12 #include <asm/current.h>
13 #include <asm/processor.h>
14 #include <linux/thread_info.h>
15 #include <linux/preempt.h>
16 #include <linux/cpumask_types.h>
17
18 #include <linux/cache.h>
19 #include <linux/irqflags_types.h>
20 #include <linux/smp_types.h>
21 #include <linux/pid_types.h>
22 #include <linux/sem_types.h>
23 #include <linux/shm.h>
24 #include <linux/kmsan_types.h>
25 #include <linux/mutex_types.h>
26 #include <linux/plist_types.h>
27 #include <linux/hrtimer_types.h>
28 #include <linux/timer_types.h>
29 #include <linux/seccomp_types.h>
30 #include <linux/nodemask_types.h>
31 #include <linux/refcount_types.h>
32 #include <linux/resource.h>
33 #include <linux/latencytop.h>
34 #include <linux/sched/prio.h>
35 #include <linux/sched/types.h>
36 #include <linux/signal_types.h>
37 #include <linux/spinlock.h>
38 #include <linux/syscall_user_dispatch_types.h>
39 #include <linux/mm_types_task.h>
40 #include <linux/netdevice_xmit.h>
41 #include <linux/task_io_accounting.h>
42 #include <linux/posix-timers_types.h>
43 #include <linux/restart_block.h>
44 #include <uapi/linux/rseq.h>
45 #include <linux/seqlock_types.h>
46 #include <linux/kcsan.h>
47 #include <linux/rv.h>
48 #include <linux/uidgid_types.h>
49 #include <linux/tracepoint-defs.h>
50 #include <asm/kmap_size.h>
51
52 /* task_struct member predeclarations (sorted alphabetically): */
53 struct audit_context;
54 struct bio_list;
55 struct blk_plug;
56 struct bpf_local_storage;
57 struct bpf_run_ctx;
58 struct bpf_net_context;
59 struct capture_control;
60 struct cfs_rq;
61 struct fs_struct;
62 struct futex_pi_state;
63 struct io_context;
64 struct io_uring_task;
65 struct mempolicy;
66 struct nameidata;
67 struct nsproxy;
68 struct perf_event_context;
69 struct perf_ctx_data;
70 struct pid_namespace;
71 struct pipe_inode_info;
72 struct rcu_node;
73 struct reclaim_state;
74 struct robust_list_head;
75 struct root_domain;
76 struct rq;
77 struct sched_attr;
78 struct sched_dl_entity;
79 struct seq_file;
80 struct sighand_struct;
81 struct signal_struct;
82 struct task_delay_info;
83 struct task_group;
84 struct task_struct;
85 struct user_event_mm;
86
87 #include <linux/sched/ext.h>
88
89 /*
90 * Task state bitmask. NOTE! These bits are also
91 * encoded in fs/proc/array.c: get_task_state().
92 *
93 * We have two separate sets of flags: task->__state
94 * is about runnability, while task->exit_state are
95 * about the task exiting. Confusing, but this way
96 * modifying one set can't modify the other one by
97 * mistake.
98 */
99
100 /* Used in tsk->__state: */
101 #define TASK_RUNNING 0x00000000
102 #define TASK_INTERRUPTIBLE 0x00000001
103 #define TASK_UNINTERRUPTIBLE 0x00000002
104 #define __TASK_STOPPED 0x00000004
105 #define __TASK_TRACED 0x00000008
106 /* Used in tsk->exit_state: */
107 #define EXIT_DEAD 0x00000010
108 #define EXIT_ZOMBIE 0x00000020
109 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
110 /* Used in tsk->__state again: */
111 #define TASK_PARKED 0x00000040
112 #define TASK_DEAD 0x00000080
113 #define TASK_WAKEKILL 0x00000100
114 #define TASK_WAKING 0x00000200
115 #define TASK_NOLOAD 0x00000400
116 #define TASK_NEW 0x00000800
117 #define TASK_RTLOCK_WAIT 0x00001000
118 #define TASK_FREEZABLE 0x00002000
119 #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
120 #define TASK_FROZEN 0x00008000
121 #define TASK_STATE_MAX 0x00010000
122
123 #define TASK_ANY (TASK_STATE_MAX-1)
124
125 /*
126 * DO NOT ADD ANY NEW USERS !
127 */
128 #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
129
130 /* Convenience macros for the sake of set_current_state: */
131 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
132 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
133 #define TASK_TRACED __TASK_TRACED
134
135 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
136
137 /* Convenience macros for the sake of wake_up(): */
138 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
139
140 /* get_task_state(): */
141 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
142 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
143 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
144 TASK_PARKED)
145
146 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
147
148 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
149 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
150 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
151
152 /*
153 * Special states are those that do not use the normal wait-loop pattern. See
154 * the comment with set_special_state().
155 */
156 #define is_special_task_state(state) \
157 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
158 TASK_DEAD | TASK_FROZEN))
159
160 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
161 # define debug_normal_state_change(state_value) \
162 do { \
163 WARN_ON_ONCE(is_special_task_state(state_value)); \
164 current->task_state_change = _THIS_IP_; \
165 } while (0)
166
167 # define debug_special_state_change(state_value) \
168 do { \
169 WARN_ON_ONCE(!is_special_task_state(state_value)); \
170 current->task_state_change = _THIS_IP_; \
171 } while (0)
172
173 # define debug_rtlock_wait_set_state() \
174 do { \
175 current->saved_state_change = current->task_state_change;\
176 current->task_state_change = _THIS_IP_; \
177 } while (0)
178
179 # define debug_rtlock_wait_restore_state() \
180 do { \
181 current->task_state_change = current->saved_state_change;\
182 } while (0)
183
184 #else
185 # define debug_normal_state_change(cond) do { } while (0)
186 # define debug_special_state_change(cond) do { } while (0)
187 # define debug_rtlock_wait_set_state() do { } while (0)
188 # define debug_rtlock_wait_restore_state() do { } while (0)
189 #endif
190
191 #define trace_set_current_state(state_value) \
192 do { \
193 if (tracepoint_enabled(sched_set_state_tp)) \
194 __trace_set_current_state(state_value); \
195 } while (0)
196
197 /*
198 * set_current_state() includes a barrier so that the write of current->__state
199 * is correctly serialised wrt the caller's subsequent test of whether to
200 * actually sleep:
201 *
202 * for (;;) {
203 * set_current_state(TASK_UNINTERRUPTIBLE);
204 * if (CONDITION)
205 * break;
206 *
207 * schedule();
208 * }
209 * __set_current_state(TASK_RUNNING);
210 *
211 * If the caller does not need such serialisation (because, for instance, the
212 * CONDITION test and condition change and wakeup are under the same lock) then
213 * use __set_current_state().
214 *
215 * The above is typically ordered against the wakeup, which does:
216 *
217 * CONDITION = 1;
218 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
219 *
220 * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
221 * accessing p->__state.
222 *
223 * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
224 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
225 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
226 *
227 * However, with slightly different timing the wakeup TASK_RUNNING store can
228 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
229 * a problem either because that will result in one extra go around the loop
230 * and our @cond test will save the day.
231 *
232 * Also see the comments of try_to_wake_up().
233 */
234 #define __set_current_state(state_value) \
235 do { \
236 debug_normal_state_change((state_value)); \
237 trace_set_current_state(state_value); \
238 WRITE_ONCE(current->__state, (state_value)); \
239 } while (0)
240
241 #define set_current_state(state_value) \
242 do { \
243 debug_normal_state_change((state_value)); \
244 trace_set_current_state(state_value); \
245 smp_store_mb(current->__state, (state_value)); \
246 } while (0)
247
248 /*
249 * set_special_state() should be used for those states when the blocking task
250 * can not use the regular condition based wait-loop. In that case we must
251 * serialize against wakeups such that any possible in-flight TASK_RUNNING
252 * stores will not collide with our state change.
253 */
254 #define set_special_state(state_value) \
255 do { \
256 unsigned long flags; /* may shadow */ \
257 \
258 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
259 debug_special_state_change((state_value)); \
260 trace_set_current_state(state_value); \
261 WRITE_ONCE(current->__state, (state_value)); \
262 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
263 } while (0)
264
265 /*
266 * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
267 *
268 * RT's spin/rwlock substitutions are state preserving. The state of the
269 * task when blocking on the lock is saved in task_struct::saved_state and
270 * restored after the lock has been acquired. These operations are
271 * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
272 * lock related wakeups while the task is blocked on the lock are
273 * redirected to operate on task_struct::saved_state to ensure that these
274 * are not dropped. On restore task_struct::saved_state is set to
275 * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
276 *
277 * The lock operation looks like this:
278 *
279 * current_save_and_set_rtlock_wait_state();
280 * for (;;) {
281 * if (try_lock())
282 * break;
283 * raw_spin_unlock_irq(&lock->wait_lock);
284 * schedule_rtlock();
285 * raw_spin_lock_irq(&lock->wait_lock);
286 * set_current_state(TASK_RTLOCK_WAIT);
287 * }
288 * current_restore_rtlock_saved_state();
289 */
290 #define current_save_and_set_rtlock_wait_state() \
291 do { \
292 lockdep_assert_irqs_disabled(); \
293 raw_spin_lock(¤t->pi_lock); \
294 current->saved_state = current->__state; \
295 debug_rtlock_wait_set_state(); \
296 trace_set_current_state(TASK_RTLOCK_WAIT); \
297 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
298 raw_spin_unlock(¤t->pi_lock); \
299 } while (0);
300
301 #define current_restore_rtlock_saved_state() \
302 do { \
303 lockdep_assert_irqs_disabled(); \
304 raw_spin_lock(¤t->pi_lock); \
305 debug_rtlock_wait_restore_state(); \
306 trace_set_current_state(current->saved_state); \
307 WRITE_ONCE(current->__state, current->saved_state); \
308 current->saved_state = TASK_RUNNING; \
309 raw_spin_unlock(¤t->pi_lock); \
310 } while (0);
311
312 #define get_current_state() READ_ONCE(current->__state)
313
314 /*
315 * Define the task command name length as enum, then it can be visible to
316 * BPF programs.
317 */
318 enum {
319 TASK_COMM_LEN = 16,
320 };
321
322 extern void sched_tick(void);
323
324 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
325
326 extern long schedule_timeout(long timeout);
327 extern long schedule_timeout_interruptible(long timeout);
328 extern long schedule_timeout_killable(long timeout);
329 extern long schedule_timeout_uninterruptible(long timeout);
330 extern long schedule_timeout_idle(long timeout);
331 asmlinkage void schedule(void);
332 extern void schedule_preempt_disabled(void);
333 asmlinkage void preempt_schedule_irq(void);
334 #ifdef CONFIG_PREEMPT_RT
335 extern void schedule_rtlock(void);
336 #endif
337
338 extern int __must_check io_schedule_prepare(void);
339 extern void io_schedule_finish(int token);
340 extern long io_schedule_timeout(long timeout);
341 extern void io_schedule(void);
342
343 /* wrapper function to trace from this header file */
344 DECLARE_TRACEPOINT(sched_set_state_tp);
345 extern void __trace_set_current_state(int state_value);
346
347 /**
348 * struct prev_cputime - snapshot of system and user cputime
349 * @utime: time spent in user mode
350 * @stime: time spent in system mode
351 * @lock: protects the above two fields
352 *
353 * Stores previous user/system time values such that we can guarantee
354 * monotonicity.
355 */
356 struct prev_cputime {
357 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
358 u64 utime;
359 u64 stime;
360 raw_spinlock_t lock;
361 #endif
362 };
363
364 enum vtime_state {
365 /* Task is sleeping or running in a CPU with VTIME inactive: */
366 VTIME_INACTIVE = 0,
367 /* Task is idle */
368 VTIME_IDLE,
369 /* Task runs in kernelspace in a CPU with VTIME active: */
370 VTIME_SYS,
371 /* Task runs in userspace in a CPU with VTIME active: */
372 VTIME_USER,
373 /* Task runs as guests in a CPU with VTIME active: */
374 VTIME_GUEST,
375 };
376
377 struct vtime {
378 seqcount_t seqcount;
379 unsigned long long starttime;
380 enum vtime_state state;
381 unsigned int cpu;
382 u64 utime;
383 u64 stime;
384 u64 gtime;
385 };
386
387 /*
388 * Utilization clamp constraints.
389 * @UCLAMP_MIN: Minimum utilization
390 * @UCLAMP_MAX: Maximum utilization
391 * @UCLAMP_CNT: Utilization clamp constraints count
392 */
393 enum uclamp_id {
394 UCLAMP_MIN = 0,
395 UCLAMP_MAX,
396 UCLAMP_CNT
397 };
398
399 extern struct root_domain def_root_domain;
400 extern struct mutex sched_domains_mutex;
401 extern void sched_domains_mutex_lock(void);
402 extern void sched_domains_mutex_unlock(void);
403
404 struct sched_param {
405 int sched_priority;
406 };
407
408 struct sched_info {
409 #ifdef CONFIG_SCHED_INFO
410 /* Cumulative counters: */
411
412 /* # of times we have run on this CPU: */
413 unsigned long pcount;
414
415 /* Time spent waiting on a runqueue: */
416 unsigned long long run_delay;
417
418 /* Max time spent waiting on a runqueue: */
419 unsigned long long max_run_delay;
420
421 /* Min time spent waiting on a runqueue: */
422 unsigned long long min_run_delay;
423
424 /* Timestamps: */
425
426 /* When did we last run on a CPU? */
427 unsigned long long last_arrival;
428
429 /* When were we last queued to run? */
430 unsigned long long last_queued;
431
432 #endif /* CONFIG_SCHED_INFO */
433 };
434
435 /*
436 * Integer metrics need fixed point arithmetic, e.g., sched/fair
437 * has a few: load, load_avg, util_avg, freq, and capacity.
438 *
439 * We define a basic fixed point arithmetic range, and then formalize
440 * all these metrics based on that basic range.
441 */
442 # define SCHED_FIXEDPOINT_SHIFT 10
443 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
444
445 /* Increase resolution of cpu_capacity calculations */
446 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
447 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
448
449 struct load_weight {
450 unsigned long weight;
451 u32 inv_weight;
452 };
453
454 /*
455 * The load/runnable/util_avg accumulates an infinite geometric series
456 * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
457 *
458 * [load_avg definition]
459 *
460 * load_avg = runnable% * scale_load_down(load)
461 *
462 * [runnable_avg definition]
463 *
464 * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
465 *
466 * [util_avg definition]
467 *
468 * util_avg = running% * SCHED_CAPACITY_SCALE
469 *
470 * where runnable% is the time ratio that a sched_entity is runnable and
471 * running% the time ratio that a sched_entity is running.
472 *
473 * For cfs_rq, they are the aggregated values of all runnable and blocked
474 * sched_entities.
475 *
476 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
477 * capacity scaling. The scaling is done through the rq_clock_pelt that is used
478 * for computing those signals (see update_rq_clock_pelt())
479 *
480 * N.B., the above ratios (runnable% and running%) themselves are in the
481 * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
482 * to as large a range as necessary. This is for example reflected by
483 * util_avg's SCHED_CAPACITY_SCALE.
484 *
485 * [Overflow issue]
486 *
487 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
488 * with the highest load (=88761), always runnable on a single cfs_rq,
489 * and should not overflow as the number already hits PID_MAX_LIMIT.
490 *
491 * For all other cases (including 32-bit kernels), struct load_weight's
492 * weight will overflow first before we do, because:
493 *
494 * Max(load_avg) <= Max(load.weight)
495 *
496 * Then it is the load_weight's responsibility to consider overflow
497 * issues.
498 */
499 struct sched_avg {
500 u64 last_update_time;
501 u64 load_sum;
502 u64 runnable_sum;
503 u32 util_sum;
504 u32 period_contrib;
505 unsigned long load_avg;
506 unsigned long runnable_avg;
507 unsigned long util_avg;
508 unsigned int util_est;
509 } ____cacheline_aligned;
510
511 /*
512 * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
513 * updates. When a task is dequeued, its util_est should not be updated if its
514 * util_avg has not been updated in the meantime.
515 * This information is mapped into the MSB bit of util_est at dequeue time.
516 * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
517 * it is safe to use MSB.
518 */
519 #define UTIL_EST_WEIGHT_SHIFT 2
520 #define UTIL_AVG_UNCHANGED 0x80000000
521
522 struct sched_statistics {
523 #ifdef CONFIG_SCHEDSTATS
524 u64 wait_start;
525 u64 wait_max;
526 u64 wait_count;
527 u64 wait_sum;
528 u64 iowait_count;
529 u64 iowait_sum;
530
531 u64 sleep_start;
532 u64 sleep_max;
533 s64 sum_sleep_runtime;
534
535 u64 block_start;
536 u64 block_max;
537 s64 sum_block_runtime;
538
539 s64 exec_max;
540 u64 slice_max;
541
542 u64 nr_migrations_cold;
543 u64 nr_failed_migrations_affine;
544 u64 nr_failed_migrations_running;
545 u64 nr_failed_migrations_hot;
546 u64 nr_forced_migrations;
547
548 u64 nr_wakeups;
549 u64 nr_wakeups_sync;
550 u64 nr_wakeups_migrate;
551 u64 nr_wakeups_local;
552 u64 nr_wakeups_remote;
553 u64 nr_wakeups_affine;
554 u64 nr_wakeups_affine_attempts;
555 u64 nr_wakeups_passive;
556 u64 nr_wakeups_idle;
557
558 #ifdef CONFIG_SCHED_CORE
559 u64 core_forceidle_sum;
560 #endif
561 #endif /* CONFIG_SCHEDSTATS */
562 } ____cacheline_aligned;
563
564 struct sched_entity {
565 /* For load-balancing: */
566 struct load_weight load;
567 struct rb_node run_node;
568 u64 deadline;
569 u64 min_vruntime;
570 u64 min_slice;
571
572 struct list_head group_node;
573 unsigned char on_rq;
574 unsigned char sched_delayed;
575 unsigned char rel_deadline;
576 unsigned char custom_slice;
577 /* hole */
578
579 u64 exec_start;
580 u64 sum_exec_runtime;
581 u64 prev_sum_exec_runtime;
582 u64 vruntime;
583 union {
584 /*
585 * When !@on_rq this field is vlag.
586 * When cfs_rq->curr == se (which implies @on_rq)
587 * this field is vprot. See protect_slice().
588 */
589 s64 vlag;
590 u64 vprot;
591 };
592 u64 slice;
593
594 u64 nr_migrations;
595
596 #ifdef CONFIG_FAIR_GROUP_SCHED
597 int depth;
598 struct sched_entity *parent;
599 /* rq on which this entity is (to be) queued: */
600 struct cfs_rq *cfs_rq;
601 /* rq "owned" by this entity/group: */
602 struct cfs_rq *my_q;
603 /* cached value of my_q->h_nr_running */
604 unsigned long runnable_weight;
605 #endif
606
607 /*
608 * Per entity load average tracking.
609 *
610 * Put into separate cache line so it does not
611 * collide with read-mostly values above.
612 */
613 struct sched_avg avg;
614 };
615
616 struct sched_rt_entity {
617 struct list_head run_list;
618 unsigned long timeout;
619 unsigned long watchdog_stamp;
620 unsigned int time_slice;
621 unsigned short on_rq;
622 unsigned short on_list;
623
624 struct sched_rt_entity *back;
625 #ifdef CONFIG_RT_GROUP_SCHED
626 struct sched_rt_entity *parent;
627 /* rq on which this entity is (to be) queued: */
628 struct rt_rq *rt_rq;
629 /* rq "owned" by this entity/group: */
630 struct rt_rq *my_q;
631 #endif
632 } __randomize_layout;
633
634 typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
635 typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
636
637 struct sched_dl_entity {
638 struct rb_node rb_node;
639
640 /*
641 * Original scheduling parameters. Copied here from sched_attr
642 * during sched_setattr(), they will remain the same until
643 * the next sched_setattr().
644 */
645 u64 dl_runtime; /* Maximum runtime for each instance */
646 u64 dl_deadline; /* Relative deadline of each instance */
647 u64 dl_period; /* Separation of two instances (period) */
648 u64 dl_bw; /* dl_runtime / dl_period */
649 u64 dl_density; /* dl_runtime / dl_deadline */
650
651 /*
652 * Actual scheduling parameters. Initialized with the values above,
653 * they are continuously updated during task execution. Note that
654 * the remaining runtime could be < 0 in case we are in overrun.
655 */
656 s64 runtime; /* Remaining runtime for this instance */
657 u64 deadline; /* Absolute deadline for this instance */
658 unsigned int flags; /* Specifying the scheduler behaviour */
659
660 /*
661 * Some bool flags:
662 *
663 * @dl_throttled tells if we exhausted the runtime. If so, the
664 * task has to wait for a replenishment to be performed at the
665 * next firing of dl_timer.
666 *
667 * @dl_yielded tells if task gave up the CPU before consuming
668 * all its available runtime during the last job.
669 *
670 * @dl_non_contending tells if the task is inactive while still
671 * contributing to the active utilization. In other words, it
672 * indicates if the inactive timer has been armed and its handler
673 * has not been executed yet. This flag is useful to avoid race
674 * conditions between the inactive timer handler and the wakeup
675 * code.
676 *
677 * @dl_overrun tells if the task asked to be informed about runtime
678 * overruns.
679 *
680 * @dl_server tells if this is a server entity.
681 *
682 * @dl_defer tells if this is a deferred or regular server. For
683 * now only defer server exists.
684 *
685 * @dl_defer_armed tells if the deferrable server is waiting
686 * for the replenishment timer to activate it.
687 *
688 * @dl_server_active tells if the dlserver is active(started).
689 * dlserver is started on first cfs enqueue on an idle runqueue
690 * and is stopped when a dequeue results in 0 cfs tasks on the
691 * runqueue. In other words, dlserver is active only when cpu's
692 * runqueue has atleast one cfs task.
693 *
694 * @dl_defer_running tells if the deferrable server is actually
695 * running, skipping the defer phase.
696 */
697 unsigned int dl_throttled : 1;
698 unsigned int dl_yielded : 1;
699 unsigned int dl_non_contending : 1;
700 unsigned int dl_overrun : 1;
701 unsigned int dl_server : 1;
702 unsigned int dl_server_active : 1;
703 unsigned int dl_defer : 1;
704 unsigned int dl_defer_armed : 1;
705 unsigned int dl_defer_running : 1;
706 unsigned int dl_server_idle : 1;
707
708 /*
709 * Bandwidth enforcement timer. Each -deadline task has its
710 * own bandwidth to be enforced, thus we need one timer per task.
711 */
712 struct hrtimer dl_timer;
713
714 /*
715 * Inactive timer, responsible for decreasing the active utilization
716 * at the "0-lag time". When a -deadline task blocks, it contributes
717 * to GRUB's active utilization until the "0-lag time", hence a
718 * timer is needed to decrease the active utilization at the correct
719 * time.
720 */
721 struct hrtimer inactive_timer;
722
723 /*
724 * Bits for DL-server functionality. Also see the comment near
725 * dl_server_update().
726 *
727 * @rq the runqueue this server is for
728 *
729 * @server_has_tasks() returns true if @server_pick return a
730 * runnable task.
731 */
732 struct rq *rq;
733 dl_server_has_tasks_f server_has_tasks;
734 dl_server_pick_f server_pick_task;
735
736 #ifdef CONFIG_RT_MUTEXES
737 /*
738 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
739 * pi_se points to the donor, otherwise points to the dl_se it belongs
740 * to (the original one/itself).
741 */
742 struct sched_dl_entity *pi_se;
743 #endif
744 };
745
746 #ifdef CONFIG_UCLAMP_TASK
747 /* Number of utilization clamp buckets (shorter alias) */
748 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
749
750 /*
751 * Utilization clamp for a scheduling entity
752 * @value: clamp value "assigned" to a se
753 * @bucket_id: bucket index corresponding to the "assigned" value
754 * @active: the se is currently refcounted in a rq's bucket
755 * @user_defined: the requested clamp value comes from user-space
756 *
757 * The bucket_id is the index of the clamp bucket matching the clamp value
758 * which is pre-computed and stored to avoid expensive integer divisions from
759 * the fast path.
760 *
761 * The active bit is set whenever a task has got an "effective" value assigned,
762 * which can be different from the clamp value "requested" from user-space.
763 * This allows to know a task is refcounted in the rq's bucket corresponding
764 * to the "effective" bucket_id.
765 *
766 * The user_defined bit is set whenever a task has got a task-specific clamp
767 * value requested from userspace, i.e. the system defaults apply to this task
768 * just as a restriction. This allows to relax default clamps when a less
769 * restrictive task-specific value has been requested, thus allowing to
770 * implement a "nice" semantic. For example, a task running with a 20%
771 * default boost can still drop its own boosting to 0%.
772 */
773 struct uclamp_se {
774 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
775 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
776 unsigned int active : 1;
777 unsigned int user_defined : 1;
778 };
779 #endif /* CONFIG_UCLAMP_TASK */
780
781 union rcu_special {
782 struct {
783 u8 blocked;
784 u8 need_qs;
785 u8 exp_hint; /* Hint for performance. */
786 u8 need_mb; /* Readers need smp_mb(). */
787 } b; /* Bits. */
788 u32 s; /* Set of bits. */
789 };
790
791 enum perf_event_task_context {
792 perf_invalid_context = -1,
793 perf_hw_context = 0,
794 perf_sw_context,
795 perf_nr_task_contexts,
796 };
797
798 /*
799 * Number of contexts where an event can trigger:
800 * task, softirq, hardirq, nmi.
801 */
802 #define PERF_NR_CONTEXTS 4
803
804 struct wake_q_node {
805 struct wake_q_node *next;
806 };
807
808 struct kmap_ctrl {
809 #ifdef CONFIG_KMAP_LOCAL
810 int idx;
811 pte_t pteval[KM_MAX_IDX];
812 #endif
813 };
814
815 struct task_struct {
816 #ifdef CONFIG_THREAD_INFO_IN_TASK
817 /*
818 * For reasons of header soup (see current_thread_info()), this
819 * must be the first element of task_struct.
820 */
821 struct thread_info thread_info;
822 #endif
823 unsigned int __state;
824
825 /* saved state for "spinlock sleepers" */
826 unsigned int saved_state;
827
828 /*
829 * This begins the randomizable portion of task_struct. Only
830 * scheduling-critical items should be added above here.
831 */
832 randomized_struct_fields_start
833
834 void *stack;
835 refcount_t usage;
836 /* Per task flags (PF_*), defined further below: */
837 unsigned int flags;
838 unsigned int ptrace;
839
840 #ifdef CONFIG_MEM_ALLOC_PROFILING
841 struct alloc_tag *alloc_tag;
842 #endif
843
844 int on_cpu;
845 struct __call_single_node wake_entry;
846 unsigned int wakee_flips;
847 unsigned long wakee_flip_decay_ts;
848 struct task_struct *last_wakee;
849
850 /*
851 * recent_used_cpu is initially set as the last CPU used by a task
852 * that wakes affine another task. Waker/wakee relationships can
853 * push tasks around a CPU where each wakeup moves to the next one.
854 * Tracking a recently used CPU allows a quick search for a recently
855 * used CPU that may be idle.
856 */
857 int recent_used_cpu;
858 int wake_cpu;
859 int on_rq;
860
861 int prio;
862 int static_prio;
863 int normal_prio;
864 unsigned int rt_priority;
865
866 struct sched_entity se;
867 struct sched_rt_entity rt;
868 struct sched_dl_entity dl;
869 struct sched_dl_entity *dl_server;
870 #ifdef CONFIG_SCHED_CLASS_EXT
871 struct sched_ext_entity scx;
872 #endif
873 const struct sched_class *sched_class;
874
875 #ifdef CONFIG_SCHED_CORE
876 struct rb_node core_node;
877 unsigned long core_cookie;
878 unsigned int core_occupation;
879 #endif
880
881 #ifdef CONFIG_CGROUP_SCHED
882 struct task_group *sched_task_group;
883 #endif
884
885
886 #ifdef CONFIG_UCLAMP_TASK
887 /*
888 * Clamp values requested for a scheduling entity.
889 * Must be updated with task_rq_lock() held.
890 */
891 struct uclamp_se uclamp_req[UCLAMP_CNT];
892 /*
893 * Effective clamp values used for a scheduling entity.
894 * Must be updated with task_rq_lock() held.
895 */
896 struct uclamp_se uclamp[UCLAMP_CNT];
897 #endif
898
899 struct sched_statistics stats;
900
901 #ifdef CONFIG_PREEMPT_NOTIFIERS
902 /* List of struct preempt_notifier: */
903 struct hlist_head preempt_notifiers;
904 #endif
905
906 #ifdef CONFIG_BLK_DEV_IO_TRACE
907 unsigned int btrace_seq;
908 #endif
909
910 unsigned int policy;
911 unsigned long max_allowed_capacity;
912 int nr_cpus_allowed;
913 const cpumask_t *cpus_ptr;
914 cpumask_t *user_cpus_ptr;
915 cpumask_t cpus_mask;
916 void *migration_pending;
917 unsigned short migration_disabled;
918 unsigned short migration_flags;
919
920 #ifdef CONFIG_PREEMPT_RCU
921 int rcu_read_lock_nesting;
922 union rcu_special rcu_read_unlock_special;
923 struct list_head rcu_node_entry;
924 struct rcu_node *rcu_blocked_node;
925 #endif /* #ifdef CONFIG_PREEMPT_RCU */
926
927 #ifdef CONFIG_TASKS_RCU
928 unsigned long rcu_tasks_nvcsw;
929 u8 rcu_tasks_holdout;
930 u8 rcu_tasks_idx;
931 int rcu_tasks_idle_cpu;
932 struct list_head rcu_tasks_holdout_list;
933 int rcu_tasks_exit_cpu;
934 struct list_head rcu_tasks_exit_list;
935 #endif /* #ifdef CONFIG_TASKS_RCU */
936
937 #ifdef CONFIG_TASKS_TRACE_RCU
938 int trc_reader_nesting;
939 int trc_ipi_to_cpu;
940 union rcu_special trc_reader_special;
941 struct list_head trc_holdout_list;
942 struct list_head trc_blkd_node;
943 int trc_blkd_cpu;
944 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
945
946 struct sched_info sched_info;
947
948 struct list_head tasks;
949 struct plist_node pushable_tasks;
950 struct rb_node pushable_dl_tasks;
951
952 struct mm_struct *mm;
953 struct mm_struct *active_mm;
954 struct address_space *faults_disabled_mapping;
955
956 int exit_state;
957 int exit_code;
958 int exit_signal;
959 /* The signal sent when the parent dies: */
960 int pdeath_signal;
961 /* JOBCTL_*, siglock protected: */
962 unsigned long jobctl;
963
964 /* Used for emulating ABI behavior of previous Linux versions: */
965 unsigned int personality;
966
967 /* Scheduler bits, serialized by scheduler locks: */
968 unsigned sched_reset_on_fork:1;
969 unsigned sched_contributes_to_load:1;
970 unsigned sched_migrated:1;
971 unsigned sched_task_hot:1;
972
973 /* Force alignment to the next boundary: */
974 unsigned :0;
975
976 /* Unserialized, strictly 'current' */
977
978 /*
979 * This field must not be in the scheduler word above due to wakelist
980 * queueing no longer being serialized by p->on_cpu. However:
981 *
982 * p->XXX = X; ttwu()
983 * schedule() if (p->on_rq && ..) // false
984 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
985 * deactivate_task() ttwu_queue_wakelist())
986 * p->on_rq = 0; p->sched_remote_wakeup = Y;
987 *
988 * guarantees all stores of 'current' are visible before
989 * ->sched_remote_wakeup gets used, so it can be in this word.
990 */
991 unsigned sched_remote_wakeup:1;
992 #ifdef CONFIG_RT_MUTEXES
993 unsigned sched_rt_mutex:1;
994 #endif
995
996 /* Bit to tell TOMOYO we're in execve(): */
997 unsigned in_execve:1;
998 unsigned in_iowait:1;
999 #ifndef TIF_RESTORE_SIGMASK
1000 unsigned restore_sigmask:1;
1001 #endif
1002 #ifdef CONFIG_MEMCG_V1
1003 unsigned in_user_fault:1;
1004 #endif
1005 #ifdef CONFIG_LRU_GEN
1006 /* whether the LRU algorithm may apply to this access */
1007 unsigned in_lru_fault:1;
1008 #endif
1009 #ifdef CONFIG_COMPAT_BRK
1010 unsigned brk_randomized:1;
1011 #endif
1012 #ifdef CONFIG_CGROUPS
1013 /* disallow userland-initiated cgroup migration */
1014 unsigned no_cgroup_migration:1;
1015 /* task is frozen/stopped (used by the cgroup freezer) */
1016 unsigned frozen:1;
1017 #endif
1018 #ifdef CONFIG_BLK_CGROUP
1019 unsigned use_memdelay:1;
1020 #endif
1021 #ifdef CONFIG_PSI
1022 /* Stalled due to lack of memory */
1023 unsigned in_memstall:1;
1024 #endif
1025 #ifdef CONFIG_PAGE_OWNER
1026 /* Used by page_owner=on to detect recursion in page tracking. */
1027 unsigned in_page_owner:1;
1028 #endif
1029 #ifdef CONFIG_EVENTFD
1030 /* Recursion prevention for eventfd_signal() */
1031 unsigned in_eventfd:1;
1032 #endif
1033 #ifdef CONFIG_ARCH_HAS_CPU_PASID
1034 unsigned pasid_activated:1;
1035 #endif
1036 #ifdef CONFIG_X86_BUS_LOCK_DETECT
1037 unsigned reported_split_lock:1;
1038 #endif
1039 #ifdef CONFIG_TASK_DELAY_ACCT
1040 /* delay due to memory thrashing */
1041 unsigned in_thrashing:1;
1042 #endif
1043 unsigned in_nf_duplicate:1;
1044 #ifdef CONFIG_PREEMPT_RT
1045 struct netdev_xmit net_xmit;
1046 #endif
1047 unsigned long atomic_flags; /* Flags requiring atomic access. */
1048
1049 struct restart_block restart_block;
1050
1051 pid_t pid;
1052 pid_t tgid;
1053
1054 #ifdef CONFIG_STACKPROTECTOR
1055 /* Canary value for the -fstack-protector GCC feature: */
1056 unsigned long stack_canary;
1057 #endif
1058 /*
1059 * Pointers to the (original) parent process, youngest child, younger sibling,
1060 * older sibling, respectively. (p->father can be replaced with
1061 * p->real_parent->pid)
1062 */
1063
1064 /* Real parent process: */
1065 struct task_struct __rcu *real_parent;
1066
1067 /* Recipient of SIGCHLD, wait4() reports: */
1068 struct task_struct __rcu *parent;
1069
1070 /*
1071 * Children/sibling form the list of natural children:
1072 */
1073 struct list_head children;
1074 struct list_head sibling;
1075 struct task_struct *group_leader;
1076
1077 /*
1078 * 'ptraced' is the list of tasks this task is using ptrace() on.
1079 *
1080 * This includes both natural children and PTRACE_ATTACH targets.
1081 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1082 */
1083 struct list_head ptraced;
1084 struct list_head ptrace_entry;
1085
1086 /* PID/PID hash table linkage. */
1087 struct pid *thread_pid;
1088 struct hlist_node pid_links[PIDTYPE_MAX];
1089 struct list_head thread_node;
1090
1091 struct completion *vfork_done;
1092
1093 /* CLONE_CHILD_SETTID: */
1094 int __user *set_child_tid;
1095
1096 /* CLONE_CHILD_CLEARTID: */
1097 int __user *clear_child_tid;
1098
1099 /* PF_KTHREAD | PF_IO_WORKER */
1100 void *worker_private;
1101
1102 u64 utime;
1103 u64 stime;
1104 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1105 u64 utimescaled;
1106 u64 stimescaled;
1107 #endif
1108 u64 gtime;
1109 struct prev_cputime prev_cputime;
1110 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1111 struct vtime vtime;
1112 #endif
1113
1114 #ifdef CONFIG_NO_HZ_FULL
1115 atomic_t tick_dep_mask;
1116 #endif
1117 /* Context switch counts: */
1118 unsigned long nvcsw;
1119 unsigned long nivcsw;
1120
1121 /* Monotonic time in nsecs: */
1122 u64 start_time;
1123
1124 /* Boot based time in nsecs: */
1125 u64 start_boottime;
1126
1127 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1128 unsigned long min_flt;
1129 unsigned long maj_flt;
1130
1131 /* Empty if CONFIG_POSIX_CPUTIMERS=n */
1132 struct posix_cputimers posix_cputimers;
1133
1134 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1135 struct posix_cputimers_work posix_cputimers_work;
1136 #endif
1137
1138 /* Process credentials: */
1139
1140 /* Tracer's credentials at attach: */
1141 const struct cred __rcu *ptracer_cred;
1142
1143 /* Objective and real subjective task credentials (COW): */
1144 const struct cred __rcu *real_cred;
1145
1146 /* Effective (overridable) subjective task credentials (COW): */
1147 const struct cred __rcu *cred;
1148
1149 #ifdef CONFIG_KEYS
1150 /* Cached requested key. */
1151 struct key *cached_requested_key;
1152 #endif
1153
1154 /*
1155 * executable name, excluding path.
1156 *
1157 * - normally initialized begin_new_exec()
1158 * - set it with set_task_comm()
1159 * - strscpy_pad() to ensure it is always NUL-terminated and
1160 * zero-padded
1161 * - task_lock() to ensure the operation is atomic and the name is
1162 * fully updated.
1163 */
1164 char comm[TASK_COMM_LEN];
1165
1166 struct nameidata *nameidata;
1167
1168 #ifdef CONFIG_SYSVIPC
1169 struct sysv_sem sysvsem;
1170 struct sysv_shm sysvshm;
1171 #endif
1172 #ifdef CONFIG_DETECT_HUNG_TASK
1173 unsigned long last_switch_count;
1174 unsigned long last_switch_time;
1175 #endif
1176 /* Filesystem information: */
1177 struct fs_struct *fs;
1178
1179 /* Open file information: */
1180 struct files_struct *files;
1181
1182 #ifdef CONFIG_IO_URING
1183 struct io_uring_task *io_uring;
1184 #endif
1185
1186 /* Namespaces: */
1187 struct nsproxy *nsproxy;
1188
1189 /* Signal handlers: */
1190 struct signal_struct *signal;
1191 struct sighand_struct __rcu *sighand;
1192 sigset_t blocked;
1193 sigset_t real_blocked;
1194 /* Restored if set_restore_sigmask() was used: */
1195 sigset_t saved_sigmask;
1196 struct sigpending pending;
1197 unsigned long sas_ss_sp;
1198 size_t sas_ss_size;
1199 unsigned int sas_ss_flags;
1200
1201 struct callback_head *task_works;
1202
1203 #ifdef CONFIG_AUDIT
1204 #ifdef CONFIG_AUDITSYSCALL
1205 struct audit_context *audit_context;
1206 #endif
1207 kuid_t loginuid;
1208 unsigned int sessionid;
1209 #endif
1210 struct seccomp seccomp;
1211 struct syscall_user_dispatch syscall_dispatch;
1212
1213 /* Thread group tracking: */
1214 u64 parent_exec_id;
1215 u64 self_exec_id;
1216
1217 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1218 spinlock_t alloc_lock;
1219
1220 /* Protection of the PI data structures: */
1221 raw_spinlock_t pi_lock;
1222
1223 struct wake_q_node wake_q;
1224
1225 #ifdef CONFIG_RT_MUTEXES
1226 /* PI waiters blocked on a rt_mutex held by this task: */
1227 struct rb_root_cached pi_waiters;
1228 /* Updated under owner's pi_lock and rq lock */
1229 struct task_struct *pi_top_task;
1230 /* Deadlock detection and priority inheritance handling: */
1231 struct rt_mutex_waiter *pi_blocked_on;
1232 #endif
1233
1234 struct mutex *blocked_on; /* lock we're blocked on */
1235
1236 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
1237 /*
1238 * Encoded lock address causing task block (lower 2 bits = type from
1239 * <linux/hung_task.h>). Accessed via hung_task_*() helpers.
1240 */
1241 unsigned long blocker;
1242 #endif
1243
1244 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1245 int non_block_count;
1246 #endif
1247
1248 #ifdef CONFIG_TRACE_IRQFLAGS
1249 struct irqtrace_events irqtrace;
1250 unsigned int hardirq_threaded;
1251 u64 hardirq_chain_key;
1252 int softirqs_enabled;
1253 int softirq_context;
1254 int irq_config;
1255 #endif
1256 #ifdef CONFIG_PREEMPT_RT
1257 int softirq_disable_cnt;
1258 #endif
1259
1260 #ifdef CONFIG_LOCKDEP
1261 # define MAX_LOCK_DEPTH 48UL
1262 u64 curr_chain_key;
1263 int lockdep_depth;
1264 unsigned int lockdep_recursion;
1265 struct held_lock held_locks[MAX_LOCK_DEPTH];
1266 #endif
1267
1268 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1269 unsigned int in_ubsan;
1270 #endif
1271
1272 /* Journalling filesystem info: */
1273 void *journal_info;
1274
1275 /* Stacked block device info: */
1276 struct bio_list *bio_list;
1277
1278 /* Stack plugging: */
1279 struct blk_plug *plug;
1280
1281 /* VM state: */
1282 struct reclaim_state *reclaim_state;
1283
1284 struct io_context *io_context;
1285
1286 #ifdef CONFIG_COMPACTION
1287 struct capture_control *capture_control;
1288 #endif
1289 /* Ptrace state: */
1290 unsigned long ptrace_message;
1291 kernel_siginfo_t *last_siginfo;
1292
1293 struct task_io_accounting ioac;
1294 #ifdef CONFIG_PSI
1295 /* Pressure stall state */
1296 unsigned int psi_flags;
1297 #endif
1298 #ifdef CONFIG_TASK_XACCT
1299 /* Accumulated RSS usage: */
1300 u64 acct_rss_mem1;
1301 /* Accumulated virtual memory usage: */
1302 u64 acct_vm_mem1;
1303 /* stime + utime since last update: */
1304 u64 acct_timexpd;
1305 #endif
1306 #ifdef CONFIG_CPUSETS
1307 /* Protected by ->alloc_lock: */
1308 nodemask_t mems_allowed;
1309 /* Sequence number to catch updates: */
1310 seqcount_spinlock_t mems_allowed_seq;
1311 int cpuset_mem_spread_rotor;
1312 #endif
1313 #ifdef CONFIG_CGROUPS
1314 /* Control Group info protected by css_set_lock: */
1315 struct css_set __rcu *cgroups;
1316 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1317 struct list_head cg_list;
1318 #endif
1319 #ifdef CONFIG_X86_CPU_RESCTRL
1320 u32 closid;
1321 u32 rmid;
1322 #endif
1323 #ifdef CONFIG_FUTEX
1324 struct robust_list_head __user *robust_list;
1325 #ifdef CONFIG_COMPAT
1326 struct compat_robust_list_head __user *compat_robust_list;
1327 #endif
1328 struct list_head pi_state_list;
1329 struct futex_pi_state *pi_state_cache;
1330 struct mutex futex_exit_mutex;
1331 unsigned int futex_state;
1332 #endif
1333 #ifdef CONFIG_PERF_EVENTS
1334 u8 perf_recursion[PERF_NR_CONTEXTS];
1335 struct perf_event_context *perf_event_ctxp;
1336 struct mutex perf_event_mutex;
1337 struct list_head perf_event_list;
1338 struct perf_ctx_data __rcu *perf_ctx_data;
1339 #endif
1340 #ifdef CONFIG_DEBUG_PREEMPT
1341 unsigned long preempt_disable_ip;
1342 #endif
1343 #ifdef CONFIG_NUMA
1344 /* Protected by alloc_lock: */
1345 struct mempolicy *mempolicy;
1346 short il_prev;
1347 u8 il_weight;
1348 short pref_node_fork;
1349 #endif
1350 #ifdef CONFIG_NUMA_BALANCING
1351 int numa_scan_seq;
1352 unsigned int numa_scan_period;
1353 unsigned int numa_scan_period_max;
1354 int numa_preferred_nid;
1355 unsigned long numa_migrate_retry;
1356 /* Migration stamp: */
1357 u64 node_stamp;
1358 u64 last_task_numa_placement;
1359 u64 last_sum_exec_runtime;
1360 struct callback_head numa_work;
1361
1362 /*
1363 * This pointer is only modified for current in syscall and
1364 * pagefault context (and for tasks being destroyed), so it can be read
1365 * from any of the following contexts:
1366 * - RCU read-side critical section
1367 * - current->numa_group from everywhere
1368 * - task's runqueue locked, task not running
1369 */
1370 struct numa_group __rcu *numa_group;
1371
1372 /*
1373 * numa_faults is an array split into four regions:
1374 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1375 * in this precise order.
1376 *
1377 * faults_memory: Exponential decaying average of faults on a per-node
1378 * basis. Scheduling placement decisions are made based on these
1379 * counts. The values remain static for the duration of a PTE scan.
1380 * faults_cpu: Track the nodes the process was running on when a NUMA
1381 * hinting fault was incurred.
1382 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1383 * during the current scan window. When the scan completes, the counts
1384 * in faults_memory and faults_cpu decay and these values are copied.
1385 */
1386 unsigned long *numa_faults;
1387 unsigned long total_numa_faults;
1388
1389 /*
1390 * numa_faults_locality tracks if faults recorded during the last
1391 * scan window were remote/local or failed to migrate. The task scan
1392 * period is adapted based on the locality of the faults with different
1393 * weights depending on whether they were shared or private faults
1394 */
1395 unsigned long numa_faults_locality[3];
1396
1397 unsigned long numa_pages_migrated;
1398 #endif /* CONFIG_NUMA_BALANCING */
1399
1400 #ifdef CONFIG_RSEQ
1401 struct rseq __user *rseq;
1402 u32 rseq_len;
1403 u32 rseq_sig;
1404 /*
1405 * RmW on rseq_event_mask must be performed atomically
1406 * with respect to preemption.
1407 */
1408 unsigned long rseq_event_mask;
1409 # ifdef CONFIG_DEBUG_RSEQ
1410 /*
1411 * This is a place holder to save a copy of the rseq fields for
1412 * validation of read-only fields. The struct rseq has a
1413 * variable-length array at the end, so it cannot be used
1414 * directly. Reserve a size large enough for the known fields.
1415 */
1416 char rseq_fields[sizeof(struct rseq)];
1417 # endif
1418 #endif
1419
1420 #ifdef CONFIG_SCHED_MM_CID
1421 int mm_cid; /* Current cid in mm */
1422 int last_mm_cid; /* Most recent cid in mm */
1423 int migrate_from_cpu;
1424 int mm_cid_active; /* Whether cid bitmap is active */
1425 struct callback_head cid_work;
1426 #endif
1427
1428 struct tlbflush_unmap_batch tlb_ubc;
1429
1430 /* Cache last used pipe for splice(): */
1431 struct pipe_inode_info *splice_pipe;
1432
1433 struct page_frag task_frag;
1434
1435 #ifdef CONFIG_TASK_DELAY_ACCT
1436 struct task_delay_info *delays;
1437 #endif
1438
1439 #ifdef CONFIG_FAULT_INJECTION
1440 int make_it_fail;
1441 unsigned int fail_nth;
1442 #endif
1443 /*
1444 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1445 * balance_dirty_pages() for a dirty throttling pause:
1446 */
1447 int nr_dirtied;
1448 int nr_dirtied_pause;
1449 /* Start of a write-and-pause period: */
1450 unsigned long dirty_paused_when;
1451
1452 #ifdef CONFIG_LATENCYTOP
1453 int latency_record_count;
1454 struct latency_record latency_record[LT_SAVECOUNT];
1455 #endif
1456 /*
1457 * Time slack values; these are used to round up poll() and
1458 * select() etc timeout values. These are in nanoseconds.
1459 */
1460 u64 timer_slack_ns;
1461 u64 default_timer_slack_ns;
1462
1463 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1464 unsigned int kasan_depth;
1465 #endif
1466
1467 #ifdef CONFIG_KCSAN
1468 struct kcsan_ctx kcsan_ctx;
1469 #ifdef CONFIG_TRACE_IRQFLAGS
1470 struct irqtrace_events kcsan_save_irqtrace;
1471 #endif
1472 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1473 int kcsan_stack_depth;
1474 #endif
1475 #endif
1476
1477 #ifdef CONFIG_KMSAN
1478 struct kmsan_ctx kmsan_ctx;
1479 #endif
1480
1481 #if IS_ENABLED(CONFIG_KUNIT)
1482 struct kunit *kunit_test;
1483 #endif
1484
1485 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1486 /* Index of current stored address in ret_stack: */
1487 int curr_ret_stack;
1488 int curr_ret_depth;
1489
1490 /* Stack of return addresses for return function tracing: */
1491 unsigned long *ret_stack;
1492
1493 /* Timestamp for last schedule: */
1494 unsigned long long ftrace_timestamp;
1495 unsigned long long ftrace_sleeptime;
1496
1497 /*
1498 * Number of functions that haven't been traced
1499 * because of depth overrun:
1500 */
1501 atomic_t trace_overrun;
1502
1503 /* Pause tracing: */
1504 atomic_t tracing_graph_pause;
1505 #endif
1506
1507 #ifdef CONFIG_TRACING
1508 /* Bitmask and counter of trace recursion: */
1509 unsigned long trace_recursion;
1510 #endif /* CONFIG_TRACING */
1511
1512 #ifdef CONFIG_KCOV
1513 /* See kernel/kcov.c for more details. */
1514
1515 /* Coverage collection mode enabled for this task (0 if disabled): */
1516 unsigned int kcov_mode;
1517
1518 /* Size of the kcov_area: */
1519 unsigned int kcov_size;
1520
1521 /* Buffer for coverage collection: */
1522 void *kcov_area;
1523
1524 /* KCOV descriptor wired with this task or NULL: */
1525 struct kcov *kcov;
1526
1527 /* KCOV common handle for remote coverage collection: */
1528 u64 kcov_handle;
1529
1530 /* KCOV sequence number: */
1531 int kcov_sequence;
1532
1533 /* Collect coverage from softirq context: */
1534 unsigned int kcov_softirq;
1535 #endif
1536
1537 #ifdef CONFIG_MEMCG_V1
1538 struct mem_cgroup *memcg_in_oom;
1539 #endif
1540
1541 #ifdef CONFIG_MEMCG
1542 /* Number of pages to reclaim on returning to userland: */
1543 unsigned int memcg_nr_pages_over_high;
1544
1545 /* Used by memcontrol for targeted memcg charge: */
1546 struct mem_cgroup *active_memcg;
1547
1548 /* Cache for current->cgroups->memcg->objcg lookups: */
1549 struct obj_cgroup *objcg;
1550 #endif
1551
1552 #ifdef CONFIG_BLK_CGROUP
1553 struct gendisk *throttle_disk;
1554 #endif
1555
1556 #ifdef CONFIG_UPROBES
1557 struct uprobe_task *utask;
1558 #endif
1559 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1560 unsigned int sequential_io;
1561 unsigned int sequential_io_avg;
1562 #endif
1563 struct kmap_ctrl kmap_ctrl;
1564 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1565 unsigned long task_state_change;
1566 # ifdef CONFIG_PREEMPT_RT
1567 unsigned long saved_state_change;
1568 # endif
1569 #endif
1570 struct rcu_head rcu;
1571 refcount_t rcu_users;
1572 int pagefault_disabled;
1573 #ifdef CONFIG_MMU
1574 struct task_struct *oom_reaper_list;
1575 struct timer_list oom_reaper_timer;
1576 #endif
1577 #ifdef CONFIG_VMAP_STACK
1578 struct vm_struct *stack_vm_area;
1579 #endif
1580 #ifdef CONFIG_THREAD_INFO_IN_TASK
1581 /* A live task holds one reference: */
1582 refcount_t stack_refcount;
1583 #endif
1584 #ifdef CONFIG_LIVEPATCH
1585 int patch_state;
1586 #endif
1587 #ifdef CONFIG_SECURITY
1588 /* Used by LSM modules for access restriction: */
1589 void *security;
1590 #endif
1591 #ifdef CONFIG_BPF_SYSCALL
1592 /* Used by BPF task local storage */
1593 struct bpf_local_storage __rcu *bpf_storage;
1594 /* Used for BPF run context */
1595 struct bpf_run_ctx *bpf_ctx;
1596 #endif
1597 /* Used by BPF for per-TASK xdp storage */
1598 struct bpf_net_context *bpf_net_context;
1599
1600 #ifdef CONFIG_KSTACK_ERASE
1601 unsigned long lowest_stack;
1602 #endif
1603 #ifdef CONFIG_KSTACK_ERASE_METRICS
1604 unsigned long prev_lowest_stack;
1605 #endif
1606
1607 #ifdef CONFIG_X86_MCE
1608 void __user *mce_vaddr;
1609 __u64 mce_kflags;
1610 u64 mce_addr;
1611 __u64 mce_ripv : 1,
1612 mce_whole_page : 1,
1613 __mce_reserved : 62;
1614 struct callback_head mce_kill_me;
1615 int mce_count;
1616 #endif
1617
1618 #ifdef CONFIG_KRETPROBES
1619 struct llist_head kretprobe_instances;
1620 #endif
1621 #ifdef CONFIG_RETHOOK
1622 struct llist_head rethooks;
1623 #endif
1624
1625 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1626 /*
1627 * If L1D flush is supported on mm context switch
1628 * then we use this callback head to queue kill work
1629 * to kill tasks that are not running on SMT disabled
1630 * cores
1631 */
1632 struct callback_head l1d_flush_kill;
1633 #endif
1634
1635 #ifdef CONFIG_RV
1636 /*
1637 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1638 * If we find justification for more monitors, we can think
1639 * about adding more or developing a dynamic method. So far,
1640 * none of these are justified.
1641 */
1642 union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1643 #endif
1644
1645 #ifdef CONFIG_USER_EVENTS
1646 struct user_event_mm *user_event_mm;
1647 #endif
1648
1649 /* CPU-specific state of this task: */
1650 struct thread_struct thread;
1651
1652 /*
1653 * New fields for task_struct should be added above here, so that
1654 * they are included in the randomized portion of task_struct.
1655 */
1656 randomized_struct_fields_end
1657 } __attribute__ ((aligned (64)));
1658
1659 #ifdef CONFIG_SCHED_PROXY_EXEC
1660 DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
sched_proxy_exec(void)1661 static inline bool sched_proxy_exec(void)
1662 {
1663 return static_branch_likely(&__sched_proxy_exec);
1664 }
1665 #else
sched_proxy_exec(void)1666 static inline bool sched_proxy_exec(void)
1667 {
1668 return false;
1669 }
1670 #endif
1671
1672 #define TASK_REPORT_IDLE (TASK_REPORT + 1)
1673 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1674
__task_state_index(unsigned int tsk_state,unsigned int tsk_exit_state)1675 static inline unsigned int __task_state_index(unsigned int tsk_state,
1676 unsigned int tsk_exit_state)
1677 {
1678 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1679
1680 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1681
1682 if ((tsk_state & TASK_IDLE) == TASK_IDLE)
1683 state = TASK_REPORT_IDLE;
1684
1685 /*
1686 * We're lying here, but rather than expose a completely new task state
1687 * to userspace, we can make this appear as if the task has gone through
1688 * a regular rt_mutex_lock() call.
1689 * Report frozen tasks as uninterruptible.
1690 */
1691 if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
1692 state = TASK_UNINTERRUPTIBLE;
1693
1694 return fls(state);
1695 }
1696
task_state_index(struct task_struct * tsk)1697 static inline unsigned int task_state_index(struct task_struct *tsk)
1698 {
1699 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1700 }
1701
task_index_to_char(unsigned int state)1702 static inline char task_index_to_char(unsigned int state)
1703 {
1704 static const char state_char[] = "RSDTtXZPI";
1705
1706 BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
1707
1708 return state_char[state];
1709 }
1710
task_state_to_char(struct task_struct * tsk)1711 static inline char task_state_to_char(struct task_struct *tsk)
1712 {
1713 return task_index_to_char(task_state_index(tsk));
1714 }
1715
1716 extern struct pid *cad_pid;
1717
1718 /*
1719 * Per process flags
1720 */
1721 #define PF_VCPU 0x00000001 /* I'm a virtual CPU */
1722 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1723 #define PF_EXITING 0x00000004 /* Getting shut down */
1724 #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
1725 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
1726 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1727 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
1728 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
1729 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1730 #define PF_DUMPCORE 0x00000200 /* Dumped core */
1731 #define PF_SIGNALED 0x00000400 /* Killed by a signal */
1732 #define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
1733 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
1734 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
1735 #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
1736 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
1737 #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
1738 #define PF_KSWAPD 0x00020000 /* I am kswapd */
1739 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
1740 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
1741 #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
1742 * I am cleaning dirty pages from some other bdi. */
1743 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1744 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1745 #define PF__HOLE__00800000 0x00800000
1746 #define PF__HOLE__01000000 0x01000000
1747 #define PF__HOLE__02000000 0x02000000
1748 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
1749 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1750 #define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
1751 * See memalloc_pin_save() */
1752 #define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
1753 #define PF__HOLE__40000000 0x40000000
1754 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
1755
1756 /*
1757 * Only the _current_ task can read/write to tsk->flags, but other
1758 * tasks can access tsk->flags in readonly mode for example
1759 * with tsk_used_math (like during threaded core dumping).
1760 * There is however an exception to this rule during ptrace
1761 * or during fork: the ptracer task is allowed to write to the
1762 * child->flags of its traced child (same goes for fork, the parent
1763 * can write to the child->flags), because we're guaranteed the
1764 * child is not running and in turn not changing child->flags
1765 * at the same time the parent does it.
1766 */
1767 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1768 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1769 #define clear_used_math() clear_stopped_child_used_math(current)
1770 #define set_used_math() set_stopped_child_used_math(current)
1771
1772 #define conditional_stopped_child_used_math(condition, child) \
1773 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1774
1775 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1776
1777 #define copy_to_stopped_child_used_math(child) \
1778 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1779
1780 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1781 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1782 #define used_math() tsk_used_math(current)
1783
is_percpu_thread(void)1784 static __always_inline bool is_percpu_thread(void)
1785 {
1786 return (current->flags & PF_NO_SETAFFINITY) &&
1787 (current->nr_cpus_allowed == 1);
1788 }
1789
1790 /* Per-process atomic flags. */
1791 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1792 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1793 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1794 #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1795 #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
1796 #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
1797 #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
1798 #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
1799
1800 #define TASK_PFA_TEST(name, func) \
1801 static inline bool task_##func(struct task_struct *p) \
1802 { return test_bit(PFA_##name, &p->atomic_flags); }
1803
1804 #define TASK_PFA_SET(name, func) \
1805 static inline void task_set_##func(struct task_struct *p) \
1806 { set_bit(PFA_##name, &p->atomic_flags); }
1807
1808 #define TASK_PFA_CLEAR(name, func) \
1809 static inline void task_clear_##func(struct task_struct *p) \
1810 { clear_bit(PFA_##name, &p->atomic_flags); }
1811
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1812 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1813 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1814
1815 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1816 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1817 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1818
1819 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1820 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1821 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1822
1823 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1824 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1825 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1826
1827 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1828 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1829 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1830
1831 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1832 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1833
1834 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1835 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1836 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1837
1838 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1839 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1840
1841 static inline void
1842 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1843 {
1844 current->flags &= ~flags;
1845 current->flags |= orig_flags & flags;
1846 }
1847
1848 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1849 extern int task_can_attach(struct task_struct *p);
1850 extern int dl_bw_alloc(int cpu, u64 dl_bw);
1851 extern void dl_bw_free(int cpu, u64 dl_bw);
1852
1853 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1854 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1855
1856 /**
1857 * set_cpus_allowed_ptr - set CPU affinity mask of a task
1858 * @p: the task
1859 * @new_mask: CPU affinity mask
1860 *
1861 * Return: zero if successful, or a negative error code
1862 */
1863 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1864 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1865 extern void release_user_cpus_ptr(struct task_struct *p);
1866 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1867 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1868 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1869
1870 extern int yield_to(struct task_struct *p, bool preempt);
1871 extern void set_user_nice(struct task_struct *p, long nice);
1872 extern int task_prio(const struct task_struct *p);
1873
1874 /**
1875 * task_nice - return the nice value of a given task.
1876 * @p: the task in question.
1877 *
1878 * Return: The nice value [ -20 ... 0 ... 19 ].
1879 */
task_nice(const struct task_struct * p)1880 static inline int task_nice(const struct task_struct *p)
1881 {
1882 return PRIO_TO_NICE((p)->static_prio);
1883 }
1884
1885 extern int can_nice(const struct task_struct *p, const int nice);
1886 extern int task_curr(const struct task_struct *p);
1887 extern int idle_cpu(int cpu);
1888 extern int available_idle_cpu(int cpu);
1889 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1890 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1891 extern void sched_set_fifo(struct task_struct *p);
1892 extern void sched_set_fifo_low(struct task_struct *p);
1893 extern void sched_set_normal(struct task_struct *p, int nice);
1894 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1895 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1896 extern struct task_struct *idle_task(int cpu);
1897
1898 /**
1899 * is_idle_task - is the specified task an idle task?
1900 * @p: the task in question.
1901 *
1902 * Return: 1 if @p is an idle task. 0 otherwise.
1903 */
is_idle_task(const struct task_struct * p)1904 static __always_inline bool is_idle_task(const struct task_struct *p)
1905 {
1906 return !!(p->flags & PF_IDLE);
1907 }
1908
1909 extern struct task_struct *curr_task(int cpu);
1910 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1911
1912 void yield(void);
1913
1914 union thread_union {
1915 struct task_struct task;
1916 #ifndef CONFIG_THREAD_INFO_IN_TASK
1917 struct thread_info thread_info;
1918 #endif
1919 unsigned long stack[THREAD_SIZE/sizeof(long)];
1920 };
1921
1922 #ifndef CONFIG_THREAD_INFO_IN_TASK
1923 extern struct thread_info init_thread_info;
1924 #endif
1925
1926 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1927
1928 #ifdef CONFIG_THREAD_INFO_IN_TASK
1929 # define task_thread_info(task) (&(task)->thread_info)
1930 #else
1931 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1932 #endif
1933
1934 /*
1935 * find a task by one of its numerical ids
1936 *
1937 * find_task_by_pid_ns():
1938 * finds a task by its pid in the specified namespace
1939 * find_task_by_vpid():
1940 * finds a task by its virtual pid
1941 *
1942 * see also find_vpid() etc in include/linux/pid.h
1943 */
1944
1945 extern struct task_struct *find_task_by_vpid(pid_t nr);
1946 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1947
1948 /*
1949 * find a task by its virtual pid and get the task struct
1950 */
1951 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1952
1953 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1954 extern int wake_up_process(struct task_struct *tsk);
1955 extern void wake_up_new_task(struct task_struct *tsk);
1956
1957 extern void kick_process(struct task_struct *tsk);
1958
1959 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1960 #define set_task_comm(tsk, from) ({ \
1961 BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
1962 __set_task_comm(tsk, from, false); \
1963 })
1964
1965 /*
1966 * - Why not use task_lock()?
1967 * User space can randomly change their names anyway, so locking for readers
1968 * doesn't make sense. For writers, locking is probably necessary, as a race
1969 * condition could lead to long-term mixed results.
1970 * The strscpy_pad() in __set_task_comm() can ensure that the task comm is
1971 * always NUL-terminated and zero-padded. Therefore the race condition between
1972 * reader and writer is not an issue.
1973 *
1974 * - BUILD_BUG_ON() can help prevent the buf from being truncated.
1975 * Since the callers don't perform any return value checks, this safeguard is
1976 * necessary.
1977 */
1978 #define get_task_comm(buf, tsk) ({ \
1979 BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
1980 strscpy_pad(buf, (tsk)->comm); \
1981 buf; \
1982 })
1983
scheduler_ipi(void)1984 static __always_inline void scheduler_ipi(void)
1985 {
1986 /*
1987 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1988 * TIF_NEED_RESCHED remotely (for the first time) will also send
1989 * this IPI.
1990 */
1991 preempt_fold_need_resched();
1992 }
1993
1994 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1995
1996 /*
1997 * Set thread flags in other task's structures.
1998 * See asm/thread_info.h for TIF_xxxx flags available:
1999 */
set_tsk_thread_flag(struct task_struct * tsk,int flag)2000 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2001 {
2002 set_ti_thread_flag(task_thread_info(tsk), flag);
2003 }
2004
clear_tsk_thread_flag(struct task_struct * tsk,int flag)2005 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2006 {
2007 clear_ti_thread_flag(task_thread_info(tsk), flag);
2008 }
2009
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)2010 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2011 bool value)
2012 {
2013 update_ti_thread_flag(task_thread_info(tsk), flag, value);
2014 }
2015
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)2016 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2017 {
2018 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2019 }
2020
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)2021 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2022 {
2023 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2024 }
2025
test_tsk_thread_flag(struct task_struct * tsk,int flag)2026 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2027 {
2028 return test_ti_thread_flag(task_thread_info(tsk), flag);
2029 }
2030
set_tsk_need_resched(struct task_struct * tsk)2031 static inline void set_tsk_need_resched(struct task_struct *tsk)
2032 {
2033 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2034 }
2035
clear_tsk_need_resched(struct task_struct * tsk)2036 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2037 {
2038 atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
2039 (atomic_long_t *)&task_thread_info(tsk)->flags);
2040 }
2041
test_tsk_need_resched(struct task_struct * tsk)2042 static inline int test_tsk_need_resched(struct task_struct *tsk)
2043 {
2044 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2045 }
2046
2047 /*
2048 * cond_resched() and cond_resched_lock(): latency reduction via
2049 * explicit rescheduling in places that are safe. The return
2050 * value indicates whether a reschedule was done in fact.
2051 * cond_resched_lock() will drop the spinlock before scheduling,
2052 */
2053 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2054 extern int __cond_resched(void);
2055
2056 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2057
2058 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2059
_cond_resched(void)2060 static __always_inline int _cond_resched(void)
2061 {
2062 return static_call_mod(cond_resched)();
2063 }
2064
2065 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2066
2067 extern int dynamic_cond_resched(void);
2068
_cond_resched(void)2069 static __always_inline int _cond_resched(void)
2070 {
2071 return dynamic_cond_resched();
2072 }
2073
2074 #else /* !CONFIG_PREEMPTION */
2075
_cond_resched(void)2076 static inline int _cond_resched(void)
2077 {
2078 return __cond_resched();
2079 }
2080
2081 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
2082
2083 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
2084
_cond_resched(void)2085 static inline int _cond_resched(void)
2086 {
2087 return 0;
2088 }
2089
2090 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
2091
2092 #define cond_resched() ({ \
2093 __might_resched(__FILE__, __LINE__, 0); \
2094 _cond_resched(); \
2095 })
2096
2097 extern int __cond_resched_lock(spinlock_t *lock);
2098 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2099 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2100
2101 #define MIGHT_RESCHED_RCU_SHIFT 8
2102 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2103
2104 #ifndef CONFIG_PREEMPT_RT
2105 /*
2106 * Non RT kernels have an elevated preempt count due to the held lock,
2107 * but are not allowed to be inside a RCU read side critical section
2108 */
2109 # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
2110 #else
2111 /*
2112 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2113 * cond_resched*lock() has to take that into account because it checks for
2114 * preempt_count() and rcu_preempt_depth().
2115 */
2116 # define PREEMPT_LOCK_RESCHED_OFFSETS \
2117 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2118 #endif
2119
2120 #define cond_resched_lock(lock) ({ \
2121 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2122 __cond_resched_lock(lock); \
2123 })
2124
2125 #define cond_resched_rwlock_read(lock) ({ \
2126 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2127 __cond_resched_rwlock_read(lock); \
2128 })
2129
2130 #define cond_resched_rwlock_write(lock) ({ \
2131 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2132 __cond_resched_rwlock_write(lock); \
2133 })
2134
2135 #ifndef CONFIG_PREEMPT_RT
__get_task_blocked_on(struct task_struct * p)2136 static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
2137 {
2138 struct mutex *m = p->blocked_on;
2139
2140 if (m)
2141 lockdep_assert_held_once(&m->wait_lock);
2142 return m;
2143 }
2144
__set_task_blocked_on(struct task_struct * p,struct mutex * m)2145 static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
2146 {
2147 WARN_ON_ONCE(!m);
2148 /* The task should only be setting itself as blocked */
2149 WARN_ON_ONCE(p != current);
2150 /* Currently we serialize blocked_on under the mutex::wait_lock */
2151 lockdep_assert_held_once(&m->wait_lock);
2152 /*
2153 * Check ensure we don't overwrite existing mutex value
2154 * with a different mutex. Note, setting it to the same
2155 * lock repeatedly is ok.
2156 */
2157 WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
2158 p->blocked_on = m;
2159 }
2160
set_task_blocked_on(struct task_struct * p,struct mutex * m)2161 static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
2162 {
2163 guard(raw_spinlock_irqsave)(&m->wait_lock);
2164 __set_task_blocked_on(p, m);
2165 }
2166
__clear_task_blocked_on(struct task_struct * p,struct mutex * m)2167 static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
2168 {
2169 WARN_ON_ONCE(!m);
2170 /* Currently we serialize blocked_on under the mutex::wait_lock */
2171 lockdep_assert_held_once(&m->wait_lock);
2172 /*
2173 * There may be cases where we re-clear already cleared
2174 * blocked_on relationships, but make sure we are not
2175 * clearing the relationship with a different lock.
2176 */
2177 WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);
2178 p->blocked_on = NULL;
2179 }
2180
clear_task_blocked_on(struct task_struct * p,struct mutex * m)2181 static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
2182 {
2183 guard(raw_spinlock_irqsave)(&m->wait_lock);
2184 __clear_task_blocked_on(p, m);
2185 }
2186 #else
__clear_task_blocked_on(struct task_struct * p,struct rt_mutex * m)2187 static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
2188 {
2189 }
2190
clear_task_blocked_on(struct task_struct * p,struct rt_mutex * m)2191 static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
2192 {
2193 }
2194 #endif /* !CONFIG_PREEMPT_RT */
2195
need_resched(void)2196 static __always_inline bool need_resched(void)
2197 {
2198 return unlikely(tif_need_resched());
2199 }
2200
2201 /*
2202 * Wrappers for p->thread_info->cpu access. No-op on UP.
2203 */
2204 #ifdef CONFIG_SMP
2205
task_cpu(const struct task_struct * p)2206 static inline unsigned int task_cpu(const struct task_struct *p)
2207 {
2208 return READ_ONCE(task_thread_info(p)->cpu);
2209 }
2210
2211 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2212
2213 #else
2214
task_cpu(const struct task_struct * p)2215 static inline unsigned int task_cpu(const struct task_struct *p)
2216 {
2217 return 0;
2218 }
2219
set_task_cpu(struct task_struct * p,unsigned int cpu)2220 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2221 {
2222 }
2223
2224 #endif /* CONFIG_SMP */
2225
task_is_runnable(struct task_struct * p)2226 static inline bool task_is_runnable(struct task_struct *p)
2227 {
2228 return p->on_rq && !p->se.sched_delayed;
2229 }
2230
2231 extern bool sched_task_on_rq(struct task_struct *p);
2232 extern unsigned long get_wchan(struct task_struct *p);
2233 extern struct task_struct *cpu_curr_snapshot(int cpu);
2234
2235 /*
2236 * In order to reduce various lock holder preemption latencies provide an
2237 * interface to see if a vCPU is currently running or not.
2238 *
2239 * This allows us to terminate optimistic spin loops and block, analogous to
2240 * the native optimistic spin heuristic of testing if the lock owner task is
2241 * running or not.
2242 */
2243 #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)2244 static inline bool vcpu_is_preempted(int cpu)
2245 {
2246 return false;
2247 }
2248 #endif
2249
2250 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2251 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2252
2253 #ifndef TASK_SIZE_OF
2254 #define TASK_SIZE_OF(tsk) TASK_SIZE
2255 #endif
2256
owner_on_cpu(struct task_struct * owner)2257 static inline bool owner_on_cpu(struct task_struct *owner)
2258 {
2259 /*
2260 * As lock holder preemption issue, we both skip spinning if
2261 * task is not on cpu or its cpu is preempted
2262 */
2263 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2264 }
2265
2266 /* Returns effective CPU energy utilization, as seen by the scheduler */
2267 unsigned long sched_cpu_util(int cpu);
2268
2269 #ifdef CONFIG_SCHED_CORE
2270 extern void sched_core_free(struct task_struct *tsk);
2271 extern void sched_core_fork(struct task_struct *p);
2272 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2273 unsigned long uaddr);
2274 extern int sched_core_idle_cpu(int cpu);
2275 #else
sched_core_free(struct task_struct * tsk)2276 static inline void sched_core_free(struct task_struct *tsk) { }
sched_core_fork(struct task_struct * p)2277 static inline void sched_core_fork(struct task_struct *p) { }
sched_core_idle_cpu(int cpu)2278 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
2279 #endif
2280
2281 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2282
2283 #ifdef CONFIG_MEM_ALLOC_PROFILING
alloc_tag_save(struct alloc_tag * tag)2284 static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
2285 {
2286 swap(current->alloc_tag, tag);
2287 return tag;
2288 }
2289
alloc_tag_restore(struct alloc_tag * tag,struct alloc_tag * old)2290 static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
2291 {
2292 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
2293 WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
2294 #endif
2295 current->alloc_tag = old;
2296 }
2297 #else
2298 #define alloc_tag_save(_tag) NULL
2299 #define alloc_tag_restore(_tag, _old) do {} while (0)
2300 #endif
2301
2302 #endif
2303