1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
31da177e4SLinus Torvalds #define _LINUX_SCHED_H
41da177e4SLinus Torvalds
55eca1c10SIngo Molnar /*
65eca1c10SIngo Molnar * Define 'struct task_struct' and provide the main scheduler
75eca1c10SIngo Molnar * APIs (schedule(), wakeup variants, etc.)
85eca1c10SIngo Molnar */
95eca1c10SIngo Molnar
10607ca46eSDavid Howells #include <uapi/linux/sched.h>
11b7b3c76aSDavid Woodhouse
1270b8157eSIngo Molnar #include <asm/current.h>
131e2f2d31SKent Overstreet #include <asm/processor.h>
141e2f2d31SKent Overstreet #include <linux/thread_info.h>
151e2f2d31SKent Overstreet #include <linux/preempt.h>
16361c1f04SYury Norov #include <linux/cpumask_types.h>
1770b8157eSIngo Molnar
18e034d49eSKent Overstreet #include <linux/cache.h>
199983deb2SKent Overstreet #include <linux/irqflags_types.h>
201e2f2d31SKent Overstreet #include <linux/smp_types.h>
216d5e9d63SKent Overstreet #include <linux/pid_types.h>
22e034d49eSKent Overstreet #include <linux/sem_types.h>
235eca1c10SIngo Molnar #include <linux/shm.h>
24f80be457SAlexander Potapenko #include <linux/kmsan_types.h>
25d84f3179SKent Overstreet #include <linux/mutex_types.h>
268b7787a5SKent Overstreet #include <linux/plist_types.h>
2750d91c76SKent Overstreet #include <linux/hrtimer_types.h>
28e034d49eSKent Overstreet #include <linux/timer_types.h>
29a6e1420cSKent Overstreet #include <linux/seccomp_types.h>
30bea32141SKent Overstreet #include <linux/nodemask_types.h>
31f9d6966bSKent Overstreet #include <linux/refcount_types.h>
325eca1c10SIngo Molnar #include <linux/resource.h>
335eca1c10SIngo Molnar #include <linux/latencytop.h>
345eca1c10SIngo Molnar #include <linux/sched/prio.h>
359eacb5c7SThomas Gleixner #include <linux/sched/types.h>
365eca1c10SIngo Molnar #include <linux/signal_types.h>
3755b899aaSKent Overstreet #include <linux/syscall_user_dispatch_types.h>
385eca1c10SIngo Molnar #include <linux/mm_types_task.h>
39ecefbc09SSebastian Andrzej Siewior #include <linux/netdevice_xmit.h>
405eca1c10SIngo Molnar #include <linux/task_io_accounting.h>
4153d31ba8SKent Overstreet #include <linux/posix-timers_types.h>
42cba6167fSKent Overstreet #include <linux/restart_block.h>
43932562a6SKent Overstreet #include <uapi/linux/rseq.h>
44f038cc13SKent Overstreet #include <linux/seqlock_types.h>
45dfd402a4SMarco Elver #include <linux/kcsan.h>
46102227b9SDaniel Bristot de Oliveira #include <linux/rv.h>
47e3ff7c60SJosh Poimboeuf #include <linux/livepatch_sched.h>
48af6da56aSKent Overstreet #include <linux/uidgid_types.h>
495fbda3ecSThomas Gleixner #include <asm/kmap_size.h>
505eca1c10SIngo Molnar
515eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */
52c7af7877SIngo Molnar struct audit_context;
53c7af7877SIngo Molnar struct bio_list;
54c7af7877SIngo Molnar struct blk_plug;
55a10787e6SSong Liu struct bpf_local_storage;
56c7603cfaSAndrii Nakryiko struct bpf_run_ctx;
57401cb7daSSebastian Andrzej Siewior struct bpf_net_context;
583c93a0c0SQais Yousef struct capture_control;
59c7af7877SIngo Molnar struct cfs_rq;
60c7af7877SIngo Molnar struct fs_struct;
61c7af7877SIngo Molnar struct futex_pi_state;
62c7af7877SIngo Molnar struct io_context;
631875dc5bSPeter Oskolkov struct io_uring_task;
64c7af7877SIngo Molnar struct mempolicy;
65c7af7877SIngo Molnar struct nameidata;
66c7af7877SIngo Molnar struct nsproxy;
67c7af7877SIngo Molnar struct perf_event_context;
68c7af7877SIngo Molnar struct pid_namespace;
69c7af7877SIngo Molnar struct pipe_inode_info;
70c7af7877SIngo Molnar struct rcu_node;
71c7af7877SIngo Molnar struct reclaim_state;
72c7af7877SIngo Molnar struct robust_list_head;
733c93a0c0SQais Yousef struct root_domain;
743c93a0c0SQais Yousef struct rq;
75e2d1e2aeSIngo Molnar struct sched_attr;
7663ba8422SPeter Zijlstra struct sched_dl_entity;
7743ae34cbSIngo Molnar struct seq_file;
78c7af7877SIngo Molnar struct sighand_struct;
79c7af7877SIngo Molnar struct signal_struct;
80c7af7877SIngo Molnar struct task_delay_info;
814cf86d77SIngo Molnar struct task_group;
8263ba8422SPeter Zijlstra struct task_struct;
83fd593511SBeau Belgrave struct user_event_mm;
841da177e4SLinus Torvalds
85f0e1a064STejun Heo #include <linux/sched/ext.h>
86f0e1a064STejun Heo
874a8342d2SLinus Torvalds /*
884a8342d2SLinus Torvalds * Task state bitmask. NOTE! These bits are also
894a8342d2SLinus Torvalds * encoded in fs/proc/array.c: get_task_state().
904a8342d2SLinus Torvalds *
9148b55837SChin Yik Ming * We have two separate sets of flags: task->__state
924a8342d2SLinus Torvalds * is about runnability, while task->exit_state are
934a8342d2SLinus Torvalds * about the task exiting. Confusing, but this way
944a8342d2SLinus Torvalds * modifying one set can't modify the other one by
954a8342d2SLinus Torvalds * mistake.
964a8342d2SLinus Torvalds */
975eca1c10SIngo Molnar
9848b55837SChin Yik Ming /* Used in tsk->__state: */
999963e444SPeter Zijlstra #define TASK_RUNNING 0x00000000
1009963e444SPeter Zijlstra #define TASK_INTERRUPTIBLE 0x00000001
1019963e444SPeter Zijlstra #define TASK_UNINTERRUPTIBLE 0x00000002
1029963e444SPeter Zijlstra #define __TASK_STOPPED 0x00000004
1039963e444SPeter Zijlstra #define __TASK_TRACED 0x00000008
1045eca1c10SIngo Molnar /* Used in tsk->exit_state: */
1059963e444SPeter Zijlstra #define EXIT_DEAD 0x00000010
1069963e444SPeter Zijlstra #define EXIT_ZOMBIE 0x00000020
107abd50b39SOleg Nesterov #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
10848b55837SChin Yik Ming /* Used in tsk->__state again: */
1099963e444SPeter Zijlstra #define TASK_PARKED 0x00000040
1109963e444SPeter Zijlstra #define TASK_DEAD 0x00000080
1119963e444SPeter Zijlstra #define TASK_WAKEKILL 0x00000100
1129963e444SPeter Zijlstra #define TASK_WAKING 0x00000200
1139963e444SPeter Zijlstra #define TASK_NOLOAD 0x00000400
1149963e444SPeter Zijlstra #define TASK_NEW 0x00000800
1159963e444SPeter Zijlstra #define TASK_RTLOCK_WAIT 0x00001000
116f5d39b02SPeter Zijlstra #define TASK_FREEZABLE 0x00002000
117f5d39b02SPeter Zijlstra #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
118f5d39b02SPeter Zijlstra #define TASK_FROZEN 0x00008000
119f5d39b02SPeter Zijlstra #define TASK_STATE_MAX 0x00010000
120f021a3c2SMatthew Wilcox
121f9fc8cadSPeter Zijlstra #define TASK_ANY (TASK_STATE_MAX-1)
122f9fc8cadSPeter Zijlstra
123f5d39b02SPeter Zijlstra /*
124f5d39b02SPeter Zijlstra * DO NOT ADD ANY NEW USERS !
125f5d39b02SPeter Zijlstra */
126f5d39b02SPeter Zijlstra #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
127f021a3c2SMatthew Wilcox
1285eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */
129f021a3c2SMatthew Wilcox #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
130f021a3c2SMatthew Wilcox #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
1312500ad1cSEric W. Biederman #define TASK_TRACED __TASK_TRACED
1321da177e4SLinus Torvalds
13380ed87c8SPeter Zijlstra #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
13480ed87c8SPeter Zijlstra
1355eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */
13692a1f4bcSMatthew Wilcox #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
13792a1f4bcSMatthew Wilcox
1385eca1c10SIngo Molnar /* get_task_state(): */
13992a1f4bcSMatthew Wilcox #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
140f021a3c2SMatthew Wilcox TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
1418ef9925bSPeter Zijlstra __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
1428ef9925bSPeter Zijlstra TASK_PARKED)
14392a1f4bcSMatthew Wilcox
1442f064a59SPeter Zijlstra #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
1455eca1c10SIngo Molnar
14631cae1eaSPeter Zijlstra #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
14731cae1eaSPeter Zijlstra #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
14831cae1eaSPeter Zijlstra #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
1495eca1c10SIngo Molnar
150b5bf9a90SPeter Zijlstra /*
151b5bf9a90SPeter Zijlstra * Special states are those that do not use the normal wait-loop pattern. See
152b5bf9a90SPeter Zijlstra * the comment with set_special_state().
153b5bf9a90SPeter Zijlstra */
154b5bf9a90SPeter Zijlstra #define is_special_task_state(state) \
155a1c44661SPeter Zijlstra ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
156a1c44661SPeter Zijlstra TASK_DEAD | TASK_FROZEN))
157b5bf9a90SPeter Zijlstra
15885019c16SThomas Gleixner #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
15985019c16SThomas Gleixner # define debug_normal_state_change(state_value) \
1608eb23b9fSPeter Zijlstra do { \
161b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value)); \
1628eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \
1638eb23b9fSPeter Zijlstra } while (0)
164b5bf9a90SPeter Zijlstra
16585019c16SThomas Gleixner # define debug_special_state_change(state_value) \
1668eb23b9fSPeter Zijlstra do { \
167b5bf9a90SPeter Zijlstra WARN_ON_ONCE(!is_special_task_state(state_value)); \
168b5bf9a90SPeter Zijlstra current->task_state_change = _THIS_IP_; \
169b5bf9a90SPeter Zijlstra } while (0)
17085019c16SThomas Gleixner
1715f220be2SThomas Gleixner # define debug_rtlock_wait_set_state() \
1725f220be2SThomas Gleixner do { \
1735f220be2SThomas Gleixner current->saved_state_change = current->task_state_change;\
1745f220be2SThomas Gleixner current->task_state_change = _THIS_IP_; \
1755f220be2SThomas Gleixner } while (0)
1765f220be2SThomas Gleixner
1775f220be2SThomas Gleixner # define debug_rtlock_wait_restore_state() \
1785f220be2SThomas Gleixner do { \
1795f220be2SThomas Gleixner current->task_state_change = current->saved_state_change;\
1805f220be2SThomas Gleixner } while (0)
1815f220be2SThomas Gleixner
1828eb23b9fSPeter Zijlstra #else
18385019c16SThomas Gleixner # define debug_normal_state_change(cond) do { } while (0)
18485019c16SThomas Gleixner # define debug_special_state_change(cond) do { } while (0)
1855f220be2SThomas Gleixner # define debug_rtlock_wait_set_state() do { } while (0)
1865f220be2SThomas Gleixner # define debug_rtlock_wait_restore_state() do { } while (0)
18785019c16SThomas Gleixner #endif
18885019c16SThomas Gleixner
189498d0c57SAndrew Morton /*
19048b55837SChin Yik Ming * set_current_state() includes a barrier so that the write of current->__state
191498d0c57SAndrew Morton * is correctly serialised wrt the caller's subsequent test of whether to
192498d0c57SAndrew Morton * actually sleep:
193498d0c57SAndrew Morton *
194a2250238SPeter Zijlstra * for (;;) {
195498d0c57SAndrew Morton * set_current_state(TASK_UNINTERRUPTIBLE);
19658877d34SPeter Zijlstra * if (CONDITION)
197a2250238SPeter Zijlstra * break;
198498d0c57SAndrew Morton *
199a2250238SPeter Zijlstra * schedule();
200a2250238SPeter Zijlstra * }
201a2250238SPeter Zijlstra * __set_current_state(TASK_RUNNING);
202a2250238SPeter Zijlstra *
203a2250238SPeter Zijlstra * If the caller does not need such serialisation (because, for instance, the
20458877d34SPeter Zijlstra * CONDITION test and condition change and wakeup are under the same lock) then
205a2250238SPeter Zijlstra * use __set_current_state().
206a2250238SPeter Zijlstra *
207a2250238SPeter Zijlstra * The above is typically ordered against the wakeup, which does:
208a2250238SPeter Zijlstra *
20958877d34SPeter Zijlstra * CONDITION = 1;
210a2250238SPeter Zijlstra * wake_up_state(p, TASK_UNINTERRUPTIBLE);
211a2250238SPeter Zijlstra *
21258877d34SPeter Zijlstra * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
21348b55837SChin Yik Ming * accessing p->__state.
214a2250238SPeter Zijlstra *
21548b55837SChin Yik Ming * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
216a2250238SPeter Zijlstra * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
217a2250238SPeter Zijlstra * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
218a2250238SPeter Zijlstra *
219b5bf9a90SPeter Zijlstra * However, with slightly different timing the wakeup TASK_RUNNING store can
220dfcb245eSIngo Molnar * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
221b5bf9a90SPeter Zijlstra * a problem either because that will result in one extra go around the loop
222b5bf9a90SPeter Zijlstra * and our @cond test will save the day.
223a2250238SPeter Zijlstra *
224a2250238SPeter Zijlstra * Also see the comments of try_to_wake_up().
225498d0c57SAndrew Morton */
226b5bf9a90SPeter Zijlstra #define __set_current_state(state_value) \
22785019c16SThomas Gleixner do { \
22885019c16SThomas Gleixner debug_normal_state_change((state_value)); \
22985019c16SThomas Gleixner WRITE_ONCE(current->__state, (state_value)); \
23085019c16SThomas Gleixner } while (0)
231b5bf9a90SPeter Zijlstra
232b5bf9a90SPeter Zijlstra #define set_current_state(state_value) \
23385019c16SThomas Gleixner do { \
23485019c16SThomas Gleixner debug_normal_state_change((state_value)); \
23585019c16SThomas Gleixner smp_store_mb(current->__state, (state_value)); \
23685019c16SThomas Gleixner } while (0)
237b5bf9a90SPeter Zijlstra
238b5bf9a90SPeter Zijlstra /*
239b5bf9a90SPeter Zijlstra * set_special_state() should be used for those states when the blocking task
240b5bf9a90SPeter Zijlstra * can not use the regular condition based wait-loop. In that case we must
24185019c16SThomas Gleixner * serialize against wakeups such that any possible in-flight TASK_RUNNING
24285019c16SThomas Gleixner * stores will not collide with our state change.
243b5bf9a90SPeter Zijlstra */
244b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \
245b5bf9a90SPeter Zijlstra do { \
246b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \
24785019c16SThomas Gleixner \
248b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \
24985019c16SThomas Gleixner debug_special_state_change((state_value)); \
2502f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)); \
251b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
252b5bf9a90SPeter Zijlstra } while (0)
253b5bf9a90SPeter Zijlstra
2545f220be2SThomas Gleixner /*
2555f220be2SThomas Gleixner * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
2565f220be2SThomas Gleixner *
2575f220be2SThomas Gleixner * RT's spin/rwlock substitutions are state preserving. The state of the
2585f220be2SThomas Gleixner * task when blocking on the lock is saved in task_struct::saved_state and
2595f220be2SThomas Gleixner * restored after the lock has been acquired. These operations are
2605f220be2SThomas Gleixner * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
2615f220be2SThomas Gleixner * lock related wakeups while the task is blocked on the lock are
2625f220be2SThomas Gleixner * redirected to operate on task_struct::saved_state to ensure that these
2635f220be2SThomas Gleixner * are not dropped. On restore task_struct::saved_state is set to
2645f220be2SThomas Gleixner * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
2655f220be2SThomas Gleixner *
2665f220be2SThomas Gleixner * The lock operation looks like this:
2675f220be2SThomas Gleixner *
2685f220be2SThomas Gleixner * current_save_and_set_rtlock_wait_state();
2695f220be2SThomas Gleixner * for (;;) {
2705f220be2SThomas Gleixner * if (try_lock())
2715f220be2SThomas Gleixner * break;
2725f220be2SThomas Gleixner * raw_spin_unlock_irq(&lock->wait_lock);
2735f220be2SThomas Gleixner * schedule_rtlock();
2745f220be2SThomas Gleixner * raw_spin_lock_irq(&lock->wait_lock);
2755f220be2SThomas Gleixner * set_current_state(TASK_RTLOCK_WAIT);
2765f220be2SThomas Gleixner * }
2775f220be2SThomas Gleixner * current_restore_rtlock_saved_state();
2785f220be2SThomas Gleixner */
2795f220be2SThomas Gleixner #define current_save_and_set_rtlock_wait_state() \
2805f220be2SThomas Gleixner do { \
2815f220be2SThomas Gleixner lockdep_assert_irqs_disabled(); \
2825f220be2SThomas Gleixner raw_spin_lock(¤t->pi_lock); \
2835f220be2SThomas Gleixner current->saved_state = current->__state; \
2845f220be2SThomas Gleixner debug_rtlock_wait_set_state(); \
2855f220be2SThomas Gleixner WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
2865f220be2SThomas Gleixner raw_spin_unlock(¤t->pi_lock); \
2875f220be2SThomas Gleixner } while (0);
2885f220be2SThomas Gleixner
2895f220be2SThomas Gleixner #define current_restore_rtlock_saved_state() \
2905f220be2SThomas Gleixner do { \
2915f220be2SThomas Gleixner lockdep_assert_irqs_disabled(); \
2925f220be2SThomas Gleixner raw_spin_lock(¤t->pi_lock); \
2935f220be2SThomas Gleixner debug_rtlock_wait_restore_state(); \
2945f220be2SThomas Gleixner WRITE_ONCE(current->__state, current->saved_state); \
2955f220be2SThomas Gleixner current->saved_state = TASK_RUNNING; \
2965f220be2SThomas Gleixner raw_spin_unlock(¤t->pi_lock); \
2975f220be2SThomas Gleixner } while (0);
2988eb23b9fSPeter Zijlstra
2992f064a59SPeter Zijlstra #define get_current_state() READ_ONCE(current->__state)
300d6c23bb3SPeter Zijlstra
3013087c61eSYafang Shao /*
3023087c61eSYafang Shao * Define the task command name length as enum, then it can be visible to
3033087c61eSYafang Shao * BPF programs.
3043087c61eSYafang Shao */
3053087c61eSYafang Shao enum {
3063087c61eSYafang Shao TASK_COMM_LEN = 16,
3073087c61eSYafang Shao };
3081da177e4SLinus Torvalds
30986dd6c04SIngo Molnar extern void sched_tick(void);
3101da177e4SLinus Torvalds
3111da177e4SLinus Torvalds #define MAX_SCHEDULE_TIMEOUT LONG_MAX
3125eca1c10SIngo Molnar
3135eca1c10SIngo Molnar extern long schedule_timeout(long timeout);
3145eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout);
3155eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout);
3165eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout);
3175eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout);
3181da177e4SLinus Torvalds asmlinkage void schedule(void);
319c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
32019c95f26SJulien Thierry asmlinkage void preempt_schedule_irq(void);
3216991436cSThomas Gleixner #ifdef CONFIG_PREEMPT_RT
3226991436cSThomas Gleixner extern void schedule_rtlock(void);
3236991436cSThomas Gleixner #endif
3241da177e4SLinus Torvalds
32510ab5643STejun Heo extern int __must_check io_schedule_prepare(void);
32610ab5643STejun Heo extern void io_schedule_finish(int token);
3279cff8adeSNeilBrown extern long io_schedule_timeout(long timeout);
32810ab5643STejun Heo extern void io_schedule(void);
3299cff8adeSNeilBrown
330f06febc9SFrank Mayhar /**
3310ba42a59SMasanari Iida * struct prev_cputime - snapshot of system and user cputime
332d37f761dSFrederic Weisbecker * @utime: time spent in user mode
333d37f761dSFrederic Weisbecker * @stime: time spent in system mode
3349d7fb042SPeter Zijlstra * @lock: protects the above two fields
335d37f761dSFrederic Weisbecker *
3369d7fb042SPeter Zijlstra * Stores previous user/system time values such that we can guarantee
3379d7fb042SPeter Zijlstra * monotonicity.
338d37f761dSFrederic Weisbecker */
3399d7fb042SPeter Zijlstra struct prev_cputime {
3409d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
3415613fda9SFrederic Weisbecker u64 utime;
3425613fda9SFrederic Weisbecker u64 stime;
3439d7fb042SPeter Zijlstra raw_spinlock_t lock;
3449d7fb042SPeter Zijlstra #endif
345d37f761dSFrederic Weisbecker };
346d37f761dSFrederic Weisbecker
347bac5b6b6SFrederic Weisbecker enum vtime_state {
348bac5b6b6SFrederic Weisbecker /* Task is sleeping or running in a CPU with VTIME inactive: */
349bac5b6b6SFrederic Weisbecker VTIME_INACTIVE = 0,
35014faf6fcSFrederic Weisbecker /* Task is idle */
35114faf6fcSFrederic Weisbecker VTIME_IDLE,
352bac5b6b6SFrederic Weisbecker /* Task runs in kernelspace in a CPU with VTIME active: */
353bac5b6b6SFrederic Weisbecker VTIME_SYS,
35414faf6fcSFrederic Weisbecker /* Task runs in userspace in a CPU with VTIME active: */
35514faf6fcSFrederic Weisbecker VTIME_USER,
356e6d5bf3eSFrederic Weisbecker /* Task runs as guests in a CPU with VTIME active: */
357e6d5bf3eSFrederic Weisbecker VTIME_GUEST,
358bac5b6b6SFrederic Weisbecker };
359bac5b6b6SFrederic Weisbecker
360bac5b6b6SFrederic Weisbecker struct vtime {
361bac5b6b6SFrederic Weisbecker seqcount_t seqcount;
362bac5b6b6SFrederic Weisbecker unsigned long long starttime;
363bac5b6b6SFrederic Weisbecker enum vtime_state state;
364802f4a82SFrederic Weisbecker unsigned int cpu;
3652a42eb95SWanpeng Li u64 utime;
3662a42eb95SWanpeng Li u64 stime;
3672a42eb95SWanpeng Li u64 gtime;
368bac5b6b6SFrederic Weisbecker };
369bac5b6b6SFrederic Weisbecker
37069842cbaSPatrick Bellasi /*
37169842cbaSPatrick Bellasi * Utilization clamp constraints.
37269842cbaSPatrick Bellasi * @UCLAMP_MIN: Minimum utilization
37369842cbaSPatrick Bellasi * @UCLAMP_MAX: Maximum utilization
37469842cbaSPatrick Bellasi * @UCLAMP_CNT: Utilization clamp constraints count
37569842cbaSPatrick Bellasi */
37669842cbaSPatrick Bellasi enum uclamp_id {
37769842cbaSPatrick Bellasi UCLAMP_MIN = 0,
37869842cbaSPatrick Bellasi UCLAMP_MAX,
37969842cbaSPatrick Bellasi UCLAMP_CNT
38069842cbaSPatrick Bellasi };
38169842cbaSPatrick Bellasi
382f9a25f77SMathieu Poirier #ifdef CONFIG_SMP
383f9a25f77SMathieu Poirier extern struct root_domain def_root_domain;
384f9a25f77SMathieu Poirier extern struct mutex sched_domains_mutex;
385f9a25f77SMathieu Poirier #endif
386f9a25f77SMathieu Poirier
387d844fe65SKir Kolyshkin struct sched_param {
388d844fe65SKir Kolyshkin int sched_priority;
389d844fe65SKir Kolyshkin };
390d844fe65SKir Kolyshkin
3911da177e4SLinus Torvalds struct sched_info {
3927f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO
3935eca1c10SIngo Molnar /* Cumulative counters: */
3941da177e4SLinus Torvalds
3955eca1c10SIngo Molnar /* # of times we have run on this CPU: */
3965eca1c10SIngo Molnar unsigned long pcount;
3975eca1c10SIngo Molnar
3985eca1c10SIngo Molnar /* Time spent waiting on a runqueue: */
3995eca1c10SIngo Molnar unsigned long long run_delay;
4005eca1c10SIngo Molnar
401658eb5abSWang Yaxin /* Max time spent waiting on a runqueue: */
402658eb5abSWang Yaxin unsigned long long max_run_delay;
403658eb5abSWang Yaxin
404f65c64f3SWang Yaxin /* Min time spent waiting on a runqueue: */
405f65c64f3SWang Yaxin unsigned long long min_run_delay;
406f65c64f3SWang Yaxin
4075eca1c10SIngo Molnar /* Timestamps: */
4085eca1c10SIngo Molnar
4095eca1c10SIngo Molnar /* When did we last run on a CPU? */
4105eca1c10SIngo Molnar unsigned long long last_arrival;
4115eca1c10SIngo Molnar
4125eca1c10SIngo Molnar /* When were we last queued to run? */
4135eca1c10SIngo Molnar unsigned long long last_queued;
4145eca1c10SIngo Molnar
415f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */
4167f5f8e8dSIngo Molnar };
4171da177e4SLinus Torvalds
4181da177e4SLinus Torvalds /*
4196ecdd749SYuyang Du * Integer metrics need fixed point arithmetic, e.g., sched/fair
4206ecdd749SYuyang Du * has a few: load, load_avg, util_avg, freq, and capacity.
4216ecdd749SYuyang Du *
4226ecdd749SYuyang Du * We define a basic fixed point arithmetic range, and then formalize
4236ecdd749SYuyang Du * all these metrics based on that basic range.
4246ecdd749SYuyang Du */
4256ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT 10
4266ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
4276ecdd749SYuyang Du
42869842cbaSPatrick Bellasi /* Increase resolution of cpu_capacity calculations */
42969842cbaSPatrick Bellasi # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
43069842cbaSPatrick Bellasi # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
43169842cbaSPatrick Bellasi
43220b8a59fSIngo Molnar struct load_weight {
4339dbdb155SPeter Zijlstra unsigned long weight;
4349dbdb155SPeter Zijlstra u32 inv_weight;
43520b8a59fSIngo Molnar };
43620b8a59fSIngo Molnar
4379d89c257SYuyang Du /*
4389f683953SVincent Guittot * The load/runnable/util_avg accumulates an infinite geometric series
4390dacee1bSVincent Guittot * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
4407b595334SYuyang Du *
4417b595334SYuyang Du * [load_avg definition]
4427b595334SYuyang Du *
4437b595334SYuyang Du * load_avg = runnable% * scale_load_down(load)
4447b595334SYuyang Du *
4459f683953SVincent Guittot * [runnable_avg definition]
4469f683953SVincent Guittot *
4479f683953SVincent Guittot * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
4487b595334SYuyang Du *
4497b595334SYuyang Du * [util_avg definition]
4507b595334SYuyang Du *
4517b595334SYuyang Du * util_avg = running% * SCHED_CAPACITY_SCALE
4527b595334SYuyang Du *
4539f683953SVincent Guittot * where runnable% is the time ratio that a sched_entity is runnable and
4549f683953SVincent Guittot * running% the time ratio that a sched_entity is running.
4557b595334SYuyang Du *
4569f683953SVincent Guittot * For cfs_rq, they are the aggregated values of all runnable and blocked
4579f683953SVincent Guittot * sched_entities.
4589f683953SVincent Guittot *
459c1b7b8d4S王文虎 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
4609f683953SVincent Guittot * capacity scaling. The scaling is done through the rq_clock_pelt that is used
4619f683953SVincent Guittot * for computing those signals (see update_rq_clock_pelt())
4627b595334SYuyang Du *
46323127296SVincent Guittot * N.B., the above ratios (runnable% and running%) themselves are in the
46423127296SVincent Guittot * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
46523127296SVincent Guittot * to as large a range as necessary. This is for example reflected by
46623127296SVincent Guittot * util_avg's SCHED_CAPACITY_SCALE.
4677b595334SYuyang Du *
4687b595334SYuyang Du * [Overflow issue]
4697b595334SYuyang Du *
4707b595334SYuyang Du * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
4717b595334SYuyang Du * with the highest load (=88761), always runnable on a single cfs_rq,
4727b595334SYuyang Du * and should not overflow as the number already hits PID_MAX_LIMIT.
4737b595334SYuyang Du *
4747b595334SYuyang Du * For all other cases (including 32-bit kernels), struct load_weight's
4757b595334SYuyang Du * weight will overflow first before we do, because:
4767b595334SYuyang Du *
4777b595334SYuyang Du * Max(load_avg) <= Max(load.weight)
4787b595334SYuyang Du *
4797b595334SYuyang Du * Then it is the load_weight's responsibility to consider overflow
4807b595334SYuyang Du * issues.
4819d89c257SYuyang Du */
4829d85f21cSPaul Turner struct sched_avg {
4835eca1c10SIngo Molnar u64 last_update_time;
4845eca1c10SIngo Molnar u64 load_sum;
4859f683953SVincent Guittot u64 runnable_sum;
4865eca1c10SIngo Molnar u32 util_sum;
4875eca1c10SIngo Molnar u32 period_contrib;
4885eca1c10SIngo Molnar unsigned long load_avg;
4899f683953SVincent Guittot unsigned long runnable_avg;
4905eca1c10SIngo Molnar unsigned long util_avg;
49111137d38SVincent Guittot unsigned int util_est;
492317d359dSPeter Zijlstra } ____cacheline_aligned;
4939d85f21cSPaul Turner
49411137d38SVincent Guittot /*
49511137d38SVincent Guittot * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
49611137d38SVincent Guittot * updates. When a task is dequeued, its util_est should not be updated if its
49711137d38SVincent Guittot * util_avg has not been updated in the meantime.
49811137d38SVincent Guittot * This information is mapped into the MSB bit of util_est at dequeue time.
49911137d38SVincent Guittot * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
50011137d38SVincent Guittot * it is safe to use MSB.
50111137d38SVincent Guittot */
50211137d38SVincent Guittot #define UTIL_EST_WEIGHT_SHIFT 2
50311137d38SVincent Guittot #define UTIL_AVG_UNCHANGED 0x80000000
50411137d38SVincent Guittot
50541acab88SLucas De Marchi struct sched_statistics {
5067f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS
50794c18227SIngo Molnar u64 wait_start;
50894c18227SIngo Molnar u64 wait_max;
5096d082592SArjan van de Ven u64 wait_count;
5106d082592SArjan van de Ven u64 wait_sum;
5118f0dfc34SArjan van de Ven u64 iowait_count;
5128f0dfc34SArjan van de Ven u64 iowait_sum;
51394c18227SIngo Molnar
51494c18227SIngo Molnar u64 sleep_start;
51520b8a59fSIngo Molnar u64 sleep_max;
51694c18227SIngo Molnar s64 sum_sleep_runtime;
51794c18227SIngo Molnar
51894c18227SIngo Molnar u64 block_start;
51920b8a59fSIngo Molnar u64 block_max;
520847fc0cdSYafang Shao s64 sum_block_runtime;
521847fc0cdSYafang Shao
5225d69eca5SPeter Zijlstra s64 exec_max;
523eba1ed4bSIngo Molnar u64 slice_max;
524cc367732SIngo Molnar
525cc367732SIngo Molnar u64 nr_migrations_cold;
526cc367732SIngo Molnar u64 nr_failed_migrations_affine;
527cc367732SIngo Molnar u64 nr_failed_migrations_running;
528cc367732SIngo Molnar u64 nr_failed_migrations_hot;
529cc367732SIngo Molnar u64 nr_forced_migrations;
530cc367732SIngo Molnar
531cc367732SIngo Molnar u64 nr_wakeups;
532cc367732SIngo Molnar u64 nr_wakeups_sync;
533cc367732SIngo Molnar u64 nr_wakeups_migrate;
534cc367732SIngo Molnar u64 nr_wakeups_local;
535cc367732SIngo Molnar u64 nr_wakeups_remote;
536cc367732SIngo Molnar u64 nr_wakeups_affine;
537cc367732SIngo Molnar u64 nr_wakeups_affine_attempts;
538cc367732SIngo Molnar u64 nr_wakeups_passive;
539cc367732SIngo Molnar u64 nr_wakeups_idle;
5404feee7d1SJosh Don
5414feee7d1SJosh Don #ifdef CONFIG_SCHED_CORE
5424feee7d1SJosh Don u64 core_forceidle_sum;
54341acab88SLucas De Marchi #endif
5444feee7d1SJosh Don #endif /* CONFIG_SCHEDSTATS */
545ceeadb83SYafang Shao } ____cacheline_aligned;
54641acab88SLucas De Marchi
54741acab88SLucas De Marchi struct sched_entity {
5485eca1c10SIngo Molnar /* For load-balancing: */
5495eca1c10SIngo Molnar struct load_weight load;
55041acab88SLucas De Marchi struct rb_node run_node;
551147f3efaSPeter Zijlstra u64 deadline;
5522227a957SAbel Wu u64 min_vruntime;
553aef6987dSPeter Zijlstra u64 min_slice;
554147f3efaSPeter Zijlstra
55541acab88SLucas De Marchi struct list_head group_node;
55682e9d045SPeter Zijlstra unsigned char on_rq;
55782e9d045SPeter Zijlstra unsigned char sched_delayed;
55882e9d045SPeter Zijlstra unsigned char rel_deadline;
559857b158dSPeter Zijlstra unsigned char custom_slice;
56082e9d045SPeter Zijlstra /* hole */
56141acab88SLucas De Marchi
56241acab88SLucas De Marchi u64 exec_start;
56341acab88SLucas De Marchi u64 sum_exec_runtime;
56441acab88SLucas De Marchi u64 prev_sum_exec_runtime;
56586bfbb7cSPeter Zijlstra u64 vruntime;
56686bfbb7cSPeter Zijlstra s64 vlag;
567147f3efaSPeter Zijlstra u64 slice;
56841acab88SLucas De Marchi
56941acab88SLucas De Marchi u64 nr_migrations;
57041acab88SLucas De Marchi
57120b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
572fed14d45SPeter Zijlstra int depth;
57320b8a59fSIngo Molnar struct sched_entity *parent;
57420b8a59fSIngo Molnar /* rq on which this entity is (to be) queued: */
57520b8a59fSIngo Molnar struct cfs_rq *cfs_rq;
57620b8a59fSIngo Molnar /* rq "owned" by this entity/group: */
57720b8a59fSIngo Molnar struct cfs_rq *my_q;
5789f683953SVincent Guittot /* cached value of my_q->h_nr_running */
5799f683953SVincent Guittot unsigned long runnable_weight;
58020b8a59fSIngo Molnar #endif
5818bd75c77SClark Williams
582141965c7SAlex Shi #ifdef CONFIG_SMP
5835a107804SJiri Olsa /*
5845a107804SJiri Olsa * Per entity load average tracking.
5855a107804SJiri Olsa *
5865a107804SJiri Olsa * Put into separate cache line so it does not
5875a107804SJiri Olsa * collide with read-mostly values above.
5885a107804SJiri Olsa */
589317d359dSPeter Zijlstra struct sched_avg avg;
5909d85f21cSPaul Turner #endif
59120b8a59fSIngo Molnar };
59270b97a7fSIngo Molnar
593fa717060SPeter Zijlstra struct sched_rt_entity {
594fa717060SPeter Zijlstra struct list_head run_list;
59578f2c7dbSPeter Zijlstra unsigned long timeout;
59657d2aa00SYing Xue unsigned long watchdog_stamp;
597bee367edSRichard Kennedy unsigned int time_slice;
598ff77e468SPeter Zijlstra unsigned short on_rq;
599ff77e468SPeter Zijlstra unsigned short on_list;
6006f505b16SPeter Zijlstra
60158d6c2d7SPeter Zijlstra struct sched_rt_entity *back;
602052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
6036f505b16SPeter Zijlstra struct sched_rt_entity *parent;
6046f505b16SPeter Zijlstra /* rq on which this entity is (to be) queued: */
6056f505b16SPeter Zijlstra struct rt_rq *rt_rq;
6066f505b16SPeter Zijlstra /* rq "owned" by this entity/group: */
6076f505b16SPeter Zijlstra struct rt_rq *my_q;
6086f505b16SPeter Zijlstra #endif
6093859a271SKees Cook } __randomize_layout;
610fa717060SPeter Zijlstra
61163ba8422SPeter Zijlstra typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
61263ba8422SPeter Zijlstra typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
61363ba8422SPeter Zijlstra
614aab03e05SDario Faggioli struct sched_dl_entity {
615aab03e05SDario Faggioli struct rb_node rb_node;
616aab03e05SDario Faggioli
617aab03e05SDario Faggioli /*
618aab03e05SDario Faggioli * Original scheduling parameters. Copied here from sched_attr
6194027d080Sxiaofeng.yan * during sched_setattr(), they will remain the same until
6204027d080Sxiaofeng.yan * the next sched_setattr().
621aab03e05SDario Faggioli */
6225eca1c10SIngo Molnar u64 dl_runtime; /* Maximum runtime for each instance */
6235eca1c10SIngo Molnar u64 dl_deadline; /* Relative deadline of each instance */
6245eca1c10SIngo Molnar u64 dl_period; /* Separation of two instances (period) */
62554d6d303SDaniel Bristot de Oliveira u64 dl_bw; /* dl_runtime / dl_period */
6263effcb42SDaniel Bristot de Oliveira u64 dl_density; /* dl_runtime / dl_deadline */
627aab03e05SDario Faggioli
628aab03e05SDario Faggioli /*
629aab03e05SDario Faggioli * Actual scheduling parameters. Initialized with the values above,
630dfcb245eSIngo Molnar * they are continuously updated during task execution. Note that
631aab03e05SDario Faggioli * the remaining runtime could be < 0 in case we are in overrun.
632aab03e05SDario Faggioli */
6335eca1c10SIngo Molnar s64 runtime; /* Remaining runtime for this instance */
6345eca1c10SIngo Molnar u64 deadline; /* Absolute deadline for this instance */
6355eca1c10SIngo Molnar unsigned int flags; /* Specifying the scheduler behaviour */
636aab03e05SDario Faggioli
637aab03e05SDario Faggioli /*
638aab03e05SDario Faggioli * Some bool flags:
639aab03e05SDario Faggioli *
640aab03e05SDario Faggioli * @dl_throttled tells if we exhausted the runtime. If so, the
641aab03e05SDario Faggioli * task has to wait for a replenishment to be performed at the
642aab03e05SDario Faggioli * next firing of dl_timer.
643aab03e05SDario Faggioli *
6445eca1c10SIngo Molnar * @dl_yielded tells if task gave up the CPU before consuming
6455bfd126eSJuri Lelli * all its available runtime during the last job.
646209a0cbdSLuca Abeni *
647209a0cbdSLuca Abeni * @dl_non_contending tells if the task is inactive while still
648209a0cbdSLuca Abeni * contributing to the active utilization. In other words, it
649209a0cbdSLuca Abeni * indicates if the inactive timer has been armed and its handler
650209a0cbdSLuca Abeni * has not been executed yet. This flag is useful to avoid race
651209a0cbdSLuca Abeni * conditions between the inactive timer handler and the wakeup
652209a0cbdSLuca Abeni * code.
65334be3930SJuri Lelli *
65434be3930SJuri Lelli * @dl_overrun tells if the task asked to be informed about runtime
65534be3930SJuri Lelli * overruns.
656f23c042cSDaniel Bristot de Oliveira *
657f23c042cSDaniel Bristot de Oliveira * @dl_server tells if this is a server entity.
658a110a81cSDaniel Bristot de Oliveira *
659a110a81cSDaniel Bristot de Oliveira * @dl_defer tells if this is a deferred or regular server. For
660a110a81cSDaniel Bristot de Oliveira * now only defer server exists.
661a110a81cSDaniel Bristot de Oliveira *
662a110a81cSDaniel Bristot de Oliveira * @dl_defer_armed tells if the deferrable server is waiting
663a110a81cSDaniel Bristot de Oliveira * for the replenishment timer to activate it.
664a110a81cSDaniel Bristot de Oliveira *
665b53127dbSVineeth Pillai (Google) * @dl_server_active tells if the dlserver is active(started).
666b53127dbSVineeth Pillai (Google) * dlserver is started on first cfs enqueue on an idle runqueue
667b53127dbSVineeth Pillai (Google) * and is stopped when a dequeue results in 0 cfs tasks on the
668b53127dbSVineeth Pillai (Google) * runqueue. In other words, dlserver is active only when cpu's
669b53127dbSVineeth Pillai (Google) * runqueue has atleast one cfs task.
670b53127dbSVineeth Pillai (Google) *
671a110a81cSDaniel Bristot de Oliveira * @dl_defer_running tells if the deferrable server is actually
672a110a81cSDaniel Bristot de Oliveira * running, skipping the defer phase.
673aab03e05SDario Faggioli */
674aa5222e9SDan Carpenter unsigned int dl_throttled : 1;
675aa5222e9SDan Carpenter unsigned int dl_yielded : 1;
676aa5222e9SDan Carpenter unsigned int dl_non_contending : 1;
67734be3930SJuri Lelli unsigned int dl_overrun : 1;
67863ba8422SPeter Zijlstra unsigned int dl_server : 1;
679b53127dbSVineeth Pillai (Google) unsigned int dl_server_active : 1;
680a110a81cSDaniel Bristot de Oliveira unsigned int dl_defer : 1;
681a110a81cSDaniel Bristot de Oliveira unsigned int dl_defer_armed : 1;
682a110a81cSDaniel Bristot de Oliveira unsigned int dl_defer_running : 1;
683aab03e05SDario Faggioli
684aab03e05SDario Faggioli /*
685aab03e05SDario Faggioli * Bandwidth enforcement timer. Each -deadline task has its
686aab03e05SDario Faggioli * own bandwidth to be enforced, thus we need one timer per task.
687aab03e05SDario Faggioli */
688aab03e05SDario Faggioli struct hrtimer dl_timer;
689209a0cbdSLuca Abeni
690209a0cbdSLuca Abeni /*
691209a0cbdSLuca Abeni * Inactive timer, responsible for decreasing the active utilization
692209a0cbdSLuca Abeni * at the "0-lag time". When a -deadline task blocks, it contributes
693209a0cbdSLuca Abeni * to GRUB's active utilization until the "0-lag time", hence a
694209a0cbdSLuca Abeni * timer is needed to decrease the active utilization at the correct
695209a0cbdSLuca Abeni * time.
696209a0cbdSLuca Abeni */
697209a0cbdSLuca Abeni struct hrtimer inactive_timer;
6982279f540SJuri Lelli
69963ba8422SPeter Zijlstra /*
70063ba8422SPeter Zijlstra * Bits for DL-server functionality. Also see the comment near
70163ba8422SPeter Zijlstra * dl_server_update().
70263ba8422SPeter Zijlstra *
70363ba8422SPeter Zijlstra * @rq the runqueue this server is for
70463ba8422SPeter Zijlstra *
70563ba8422SPeter Zijlstra * @server_has_tasks() returns true if @server_pick return a
70663ba8422SPeter Zijlstra * runnable task.
70763ba8422SPeter Zijlstra */
70863ba8422SPeter Zijlstra struct rq *rq;
70963ba8422SPeter Zijlstra dl_server_has_tasks_f server_has_tasks;
710c8a85394SJoel Fernandes (Google) dl_server_pick_f server_pick_task;
71163ba8422SPeter Zijlstra
7122279f540SJuri Lelli #ifdef CONFIG_RT_MUTEXES
7132279f540SJuri Lelli /*
7142279f540SJuri Lelli * Priority Inheritance. When a DEADLINE scheduling entity is boosted
7152279f540SJuri Lelli * pi_se points to the donor, otherwise points to the dl_se it belongs
7162279f540SJuri Lelli * to (the original one/itself).
7172279f540SJuri Lelli */
7182279f540SJuri Lelli struct sched_dl_entity *pi_se;
7192279f540SJuri Lelli #endif
720aab03e05SDario Faggioli };
7218bd75c77SClark Williams
72269842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK
72369842cbaSPatrick Bellasi /* Number of utilization clamp buckets (shorter alias) */
72469842cbaSPatrick Bellasi #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
72569842cbaSPatrick Bellasi
72669842cbaSPatrick Bellasi /*
72769842cbaSPatrick Bellasi * Utilization clamp for a scheduling entity
72869842cbaSPatrick Bellasi * @value: clamp value "assigned" to a se
72969842cbaSPatrick Bellasi * @bucket_id: bucket index corresponding to the "assigned" value
730e8f14172SPatrick Bellasi * @active: the se is currently refcounted in a rq's bucket
731a509a7cdSPatrick Bellasi * @user_defined: the requested clamp value comes from user-space
73269842cbaSPatrick Bellasi *
73369842cbaSPatrick Bellasi * The bucket_id is the index of the clamp bucket matching the clamp value
73469842cbaSPatrick Bellasi * which is pre-computed and stored to avoid expensive integer divisions from
73569842cbaSPatrick Bellasi * the fast path.
736e8f14172SPatrick Bellasi *
737e8f14172SPatrick Bellasi * The active bit is set whenever a task has got an "effective" value assigned,
738e8f14172SPatrick Bellasi * which can be different from the clamp value "requested" from user-space.
739e8f14172SPatrick Bellasi * This allows to know a task is refcounted in the rq's bucket corresponding
740e8f14172SPatrick Bellasi * to the "effective" bucket_id.
741a509a7cdSPatrick Bellasi *
742a509a7cdSPatrick Bellasi * The user_defined bit is set whenever a task has got a task-specific clamp
743a509a7cdSPatrick Bellasi * value requested from userspace, i.e. the system defaults apply to this task
744a509a7cdSPatrick Bellasi * just as a restriction. This allows to relax default clamps when a less
745a509a7cdSPatrick Bellasi * restrictive task-specific value has been requested, thus allowing to
746a509a7cdSPatrick Bellasi * implement a "nice" semantic. For example, a task running with a 20%
747a509a7cdSPatrick Bellasi * default boost can still drop its own boosting to 0%.
74869842cbaSPatrick Bellasi */
74969842cbaSPatrick Bellasi struct uclamp_se {
75069842cbaSPatrick Bellasi unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
75169842cbaSPatrick Bellasi unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
752e8f14172SPatrick Bellasi unsigned int active : 1;
753a509a7cdSPatrick Bellasi unsigned int user_defined : 1;
75469842cbaSPatrick Bellasi };
75569842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */
75669842cbaSPatrick Bellasi
7571d082fd0SPaul E. McKenney union rcu_special {
7581d082fd0SPaul E. McKenney struct {
7598203d6d0SPaul E. McKenney u8 blocked;
7608203d6d0SPaul E. McKenney u8 need_qs;
76105f41571SPaul E. McKenney u8 exp_hint; /* Hint for performance. */
762276c4104SPaul E. McKenney u8 need_mb; /* Readers need smp_mb(). */
7638203d6d0SPaul E. McKenney } b; /* Bits. */
76405f41571SPaul E. McKenney u32 s; /* Set of bits. */
7651d082fd0SPaul E. McKenney };
76686848966SPaul E. McKenney
7678dc85d54SPeter Zijlstra enum perf_event_task_context {
7688dc85d54SPeter Zijlstra perf_invalid_context = -1,
7698dc85d54SPeter Zijlstra perf_hw_context = 0,
77089a1e187SPeter Zijlstra perf_sw_context,
7718dc85d54SPeter Zijlstra perf_nr_task_contexts,
7728dc85d54SPeter Zijlstra };
7738dc85d54SPeter Zijlstra
7740d40a6d8SSebastian Andrzej Siewior /*
7750d40a6d8SSebastian Andrzej Siewior * Number of contexts where an event can trigger:
7760d40a6d8SSebastian Andrzej Siewior * task, softirq, hardirq, nmi.
7770d40a6d8SSebastian Andrzej Siewior */
7780d40a6d8SSebastian Andrzej Siewior #define PERF_NR_CONTEXTS 4
7790d40a6d8SSebastian Andrzej Siewior
780eb61baf6SIngo Molnar struct wake_q_node {
781eb61baf6SIngo Molnar struct wake_q_node *next;
782eb61baf6SIngo Molnar };
783eb61baf6SIngo Molnar
7845fbda3ecSThomas Gleixner struct kmap_ctrl {
7855fbda3ecSThomas Gleixner #ifdef CONFIG_KMAP_LOCAL
7865fbda3ecSThomas Gleixner int idx;
7875fbda3ecSThomas Gleixner pte_t pteval[KM_MAX_IDX];
7885fbda3ecSThomas Gleixner #endif
7895fbda3ecSThomas Gleixner };
7905fbda3ecSThomas Gleixner
7911da177e4SLinus Torvalds struct task_struct {
792c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
793c65eacbeSAndy Lutomirski /*
794c65eacbeSAndy Lutomirski * For reasons of header soup (see current_thread_info()), this
795c65eacbeSAndy Lutomirski * must be the first element of task_struct.
796c65eacbeSAndy Lutomirski */
797c65eacbeSAndy Lutomirski struct thread_info thread_info;
798c65eacbeSAndy Lutomirski #endif
7992f064a59SPeter Zijlstra unsigned int __state;
80029e48ce8SKees Cook
8015f220be2SThomas Gleixner /* saved state for "spinlock sleepers" */
8025f220be2SThomas Gleixner unsigned int saved_state;
8035f220be2SThomas Gleixner
80429e48ce8SKees Cook /*
80529e48ce8SKees Cook * This begins the randomizable portion of task_struct. Only
80629e48ce8SKees Cook * scheduling-critical items should be added above here.
80729e48ce8SKees Cook */
80829e48ce8SKees Cook randomized_struct_fields_start
80929e48ce8SKees Cook
810f7e4217bSRoman Zippel void *stack;
811ec1d2819SElena Reshetova refcount_t usage;
8125eca1c10SIngo Molnar /* Per task flags (PF_*), defined further below: */
8135eca1c10SIngo Molnar unsigned int flags;
81497dc32cdSWilliam Cohen unsigned int ptrace;
8151da177e4SLinus Torvalds
81622d407b1SSuren Baghdasaryan #ifdef CONFIG_MEM_ALLOC_PROFILING
81722d407b1SSuren Baghdasaryan struct alloc_tag *alloc_tag;
81822d407b1SSuren Baghdasaryan #endif
81922d407b1SSuren Baghdasaryan
8202dd73a4fSPeter Williams #ifdef CONFIG_SMP
8213ca7a440SPeter Zijlstra int on_cpu;
8228c4890d1SPeter Zijlstra struct __call_single_node wake_entry;
82363b0e9edSMike Galbraith unsigned int wakee_flips;
82462470419SMichael Wang unsigned long wakee_flip_decay_ts;
82563b0e9edSMike Galbraith struct task_struct *last_wakee;
826ac66f547SPeter Zijlstra
82732e839ddSMel Gorman /*
82832e839ddSMel Gorman * recent_used_cpu is initially set as the last CPU used by a task
82932e839ddSMel Gorman * that wakes affine another task. Waker/wakee relationships can
83032e839ddSMel Gorman * push tasks around a CPU where each wakeup moves to the next one.
83132e839ddSMel Gorman * Tracking a recently used CPU allows a quick search for a recently
83232e839ddSMel Gorman * used CPU that may be idle.
83332e839ddSMel Gorman */
83432e839ddSMel Gorman int recent_used_cpu;
835ac66f547SPeter Zijlstra int wake_cpu;
8364866cde0SNick Piggin #endif
837fd2f4419SPeter Zijlstra int on_rq;
83850e645a8SIngo Molnar
8395eca1c10SIngo Molnar int prio;
8405eca1c10SIngo Molnar int static_prio;
8415eca1c10SIngo Molnar int normal_prio;
842c7aceabaSRichard Kennedy unsigned int rt_priority;
8435eca1c10SIngo Molnar
84420b8a59fSIngo Molnar struct sched_entity se;
845fa717060SPeter Zijlstra struct sched_rt_entity rt;
8468a311c74SPeter Zijlstra struct sched_dl_entity dl;
84763ba8422SPeter Zijlstra struct sched_dl_entity *dl_server;
848f0e1a064STejun Heo #ifdef CONFIG_SCHED_CLASS_EXT
849f0e1a064STejun Heo struct sched_ext_entity scx;
850f0e1a064STejun Heo #endif
851804bccbaSKees Cook const struct sched_class *sched_class;
8528a311c74SPeter Zijlstra
8538a311c74SPeter Zijlstra #ifdef CONFIG_SCHED_CORE
8548a311c74SPeter Zijlstra struct rb_node core_node;
8558a311c74SPeter Zijlstra unsigned long core_cookie;
856d2dfa17bSPeter Zijlstra unsigned int core_occupation;
8578a311c74SPeter Zijlstra #endif
8588a311c74SPeter Zijlstra
8598323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
8608323f26cSPeter Zijlstra struct task_group *sched_task_group;
8618323f26cSPeter Zijlstra #endif
8621da177e4SLinus Torvalds
86322d407b1SSuren Baghdasaryan
86469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK
86513685c4aSQais Yousef /*
86613685c4aSQais Yousef * Clamp values requested for a scheduling entity.
86713685c4aSQais Yousef * Must be updated with task_rq_lock() held.
86813685c4aSQais Yousef */
869e8f14172SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT];
87013685c4aSQais Yousef /*
87113685c4aSQais Yousef * Effective clamp values used for a scheduling entity.
87213685c4aSQais Yousef * Must be updated with task_rq_lock() held.
87313685c4aSQais Yousef */
87469842cbaSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT];
87569842cbaSPatrick Bellasi #endif
87669842cbaSPatrick Bellasi
877ceeadb83SYafang Shao struct sched_statistics stats;
878ceeadb83SYafang Shao
879e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
8805eca1c10SIngo Molnar /* List of struct preempt_notifier: */
881e107be36SAvi Kivity struct hlist_head preempt_notifiers;
882e107be36SAvi Kivity #endif
883e107be36SAvi Kivity
8846c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
8852056a782SJens Axboe unsigned int btrace_seq;
8866c5c9341SAlexey Dobriyan #endif
8871da177e4SLinus Torvalds
88897dc32cdSWilliam Cohen unsigned int policy;
88922d56074SQais Yousef unsigned long max_allowed_capacity;
89029baa747SPeter Zijlstra int nr_cpus_allowed;
8913bd37062SSebastian Andrzej Siewior const cpumask_t *cpus_ptr;
892b90ca8baSWill Deacon cpumask_t *user_cpus_ptr;
8933bd37062SSebastian Andrzej Siewior cpumask_t cpus_mask;
8946d337eabSPeter Zijlstra void *migration_pending;
89574d862b6SThomas Gleixner #ifdef CONFIG_SMP
896a7c81556SPeter Zijlstra unsigned short migration_disabled;
897af449901SPeter Zijlstra #endif
898a7c81556SPeter Zijlstra unsigned short migration_flags;
8991da177e4SLinus Torvalds
900a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
901e260be67SPaul E. McKenney int rcu_read_lock_nesting;
9021d082fd0SPaul E. McKenney union rcu_special rcu_read_unlock_special;
903f41d911fSPaul E. McKenney struct list_head rcu_node_entry;
904a57eb940SPaul E. McKenney struct rcu_node *rcu_blocked_node;
90528f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */
9065eca1c10SIngo Molnar
9078315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
9088315f422SPaul E. McKenney unsigned long rcu_tasks_nvcsw;
909ccdd29ffSPaul E. McKenney u8 rcu_tasks_holdout;
910ccdd29ffSPaul E. McKenney u8 rcu_tasks_idx;
911176f8f7aSPaul E. McKenney int rcu_tasks_idle_cpu;
912ccdd29ffSPaul E. McKenney struct list_head rcu_tasks_holdout_list;
913bfe93930SPaul E. McKenney int rcu_tasks_exit_cpu;
914bfe93930SPaul E. McKenney struct list_head rcu_tasks_exit_list;
9158315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
916e260be67SPaul E. McKenney
917d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU
918d5f177d3SPaul E. McKenney int trc_reader_nesting;
919d5f177d3SPaul E. McKenney int trc_ipi_to_cpu;
920276c4104SPaul E. McKenney union rcu_special trc_reader_special;
921d5f177d3SPaul E. McKenney struct list_head trc_holdout_list;
922434c9eefSPaul E. McKenney struct list_head trc_blkd_node;
923434c9eefSPaul E. McKenney int trc_blkd_cpu;
924d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
925d5f177d3SPaul E. McKenney
9261da177e4SLinus Torvalds struct sched_info sched_info;
9271da177e4SLinus Torvalds
9281da177e4SLinus Torvalds struct list_head tasks;
929806c09a7SDario Faggioli #ifdef CONFIG_SMP
930917b627dSGregory Haskins struct plist_node pushable_tasks;
9311baca4ceSJuri Lelli struct rb_node pushable_dl_tasks;
932806c09a7SDario Faggioli #endif
9331da177e4SLinus Torvalds
9345eca1c10SIngo Molnar struct mm_struct *mm;
9355eca1c10SIngo Molnar struct mm_struct *active_mm;
9362b69987bSKent Overstreet struct address_space *faults_disabled_mapping;
937314ff785SIngo Molnar
93897dc32cdSWilliam Cohen int exit_state;
9395eca1c10SIngo Molnar int exit_code;
9405eca1c10SIngo Molnar int exit_signal;
9415eca1c10SIngo Molnar /* The signal sent when the parent dies: */
9425eca1c10SIngo Molnar int pdeath_signal;
9435eca1c10SIngo Molnar /* JOBCTL_*, siglock protected: */
9445eca1c10SIngo Molnar unsigned long jobctl;
9459b89f6baSAndrei Epure
9465eca1c10SIngo Molnar /* Used for emulating ABI behavior of previous Linux versions: */
94797dc32cdSWilliam Cohen unsigned int personality;
9489b89f6baSAndrei Epure
9495eca1c10SIngo Molnar /* Scheduler bits, serialized by scheduler locks: */
950ca94c442SLennart Poettering unsigned sched_reset_on_fork:1;
951a8e4f2eaSPeter Zijlstra unsigned sched_contributes_to_load:1;
952ff303e66SPeter Zijlstra unsigned sched_migrated:1;
953a430d99eSPeter Zijlstra unsigned sched_task_hot:1;
954eb414681SJohannes Weiner
9555eca1c10SIngo Molnar /* Force alignment to the next boundary: */
9565eca1c10SIngo Molnar unsigned :0;
957be958bdcSPeter Zijlstra
9585eca1c10SIngo Molnar /* Unserialized, strictly 'current' */
9595eca1c10SIngo Molnar
960f97bb527SPeter Zijlstra /*
961f97bb527SPeter Zijlstra * This field must not be in the scheduler word above due to wakelist
962f97bb527SPeter Zijlstra * queueing no longer being serialized by p->on_cpu. However:
963f97bb527SPeter Zijlstra *
964f97bb527SPeter Zijlstra * p->XXX = X; ttwu()
965f97bb527SPeter Zijlstra * schedule() if (p->on_rq && ..) // false
966f97bb527SPeter Zijlstra * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
967f97bb527SPeter Zijlstra * deactivate_task() ttwu_queue_wakelist())
968f97bb527SPeter Zijlstra * p->on_rq = 0; p->sched_remote_wakeup = Y;
969f97bb527SPeter Zijlstra *
970f97bb527SPeter Zijlstra * guarantees all stores of 'current' are visible before
971f97bb527SPeter Zijlstra * ->sched_remote_wakeup gets used, so it can be in this word.
972f97bb527SPeter Zijlstra */
973f97bb527SPeter Zijlstra unsigned sched_remote_wakeup:1;
9746b596e62SPeter Zijlstra #ifdef CONFIG_RT_MUTEXES
9756b596e62SPeter Zijlstra unsigned sched_rt_mutex:1;
9766b596e62SPeter Zijlstra #endif
977f97bb527SPeter Zijlstra
97890383cc0SKees Cook /* Bit to tell TOMOYO we're in execve(): */
9795eca1c10SIngo Molnar unsigned in_execve:1;
980be958bdcSPeter Zijlstra unsigned in_iowait:1;
9815eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK
9827e781418SAndy Lutomirski unsigned restore_sigmask:1;
9837e781418SAndy Lutomirski #endif
9841419ff98SRoman Gushchin #ifdef CONFIG_MEMCG_V1
98529ef680aSMichal Hocko unsigned in_user_fault:1;
986127424c8SJohannes Weiner #endif
987ec1c86b2SYu Zhao #ifdef CONFIG_LRU_GEN
988ec1c86b2SYu Zhao /* whether the LRU algorithm may apply to this access */
989ec1c86b2SYu Zhao unsigned in_lru_fault:1;
990ec1c86b2SYu Zhao #endif
991ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK
992ff303e66SPeter Zijlstra unsigned brk_randomized:1;
993ff303e66SPeter Zijlstra #endif
99477f88796STejun Heo #ifdef CONFIG_CGROUPS
99577f88796STejun Heo /* disallow userland-initiated cgroup migration */
99677f88796STejun Heo unsigned no_cgroup_migration:1;
99776f969e8SRoman Gushchin /* task is frozen/stopped (used by the cgroup freezer) */
99876f969e8SRoman Gushchin unsigned frozen:1;
99977f88796STejun Heo #endif
1000d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP
1001d09d8df3SJosef Bacik unsigned use_memdelay:1;
1002d09d8df3SJosef Bacik #endif
10031066d1b6SYafang Shao #ifdef CONFIG_PSI
10041066d1b6SYafang Shao /* Stalled due to lack of memory */
10051066d1b6SYafang Shao unsigned in_memstall:1;
10061066d1b6SYafang Shao #endif
10078e9b16c4SSergei Trofimovich #ifdef CONFIG_PAGE_OWNER
10088e9b16c4SSergei Trofimovich /* Used by page_owner=on to detect recursion in page tracking. */
10098e9b16c4SSergei Trofimovich unsigned in_page_owner:1;
10108e9b16c4SSergei Trofimovich #endif
1011b542e383SThomas Gleixner #ifdef CONFIG_EVENTFD
1012b542e383SThomas Gleixner /* Recursion prevention for eventfd_signal() */
10139f0deaa1SDylan Yudaken unsigned in_eventfd:1;
1014b542e383SThomas Gleixner #endif
10158f23f5dbSJason Gunthorpe #ifdef CONFIG_ARCH_HAS_CPU_PASID
1016a3d29e82SPeter Zijlstra unsigned pasid_activated:1;
1017a3d29e82SPeter Zijlstra #endif
1018350afa8aSRavi Bangoria #ifdef CONFIG_X86_BUS_LOCK_DETECT
1019b041b525STony Luck unsigned reported_split_lock:1;
1020b041b525STony Luck #endif
1021aa1cf99bSYang Yang #ifdef CONFIG_TASK_DELAY_ACCT
1022aa1cf99bSYang Yang /* delay due to memory thrashing */
1023aa1cf99bSYang Yang unsigned in_thrashing:1;
1024aa1cf99bSYang Yang #endif
1025ecefbc09SSebastian Andrzej Siewior #ifdef CONFIG_PREEMPT_RT
1026ecefbc09SSebastian Andrzej Siewior struct netdev_xmit net_xmit;
1027ecefbc09SSebastian Andrzej Siewior #endif
10285eca1c10SIngo Molnar unsigned long atomic_flags; /* Flags requiring atomic access. */
10291d4457f9SKees Cook
1030f56141e3SAndy Lutomirski struct restart_block restart_block;
1031f56141e3SAndy Lutomirski
10321da177e4SLinus Torvalds pid_t pid;
10331da177e4SLinus Torvalds pid_t tgid;
10340a425405SArjan van de Ven
1035050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
10365eca1c10SIngo Molnar /* Canary value for the -fstack-protector GCC feature: */
10370a425405SArjan van de Ven unsigned long stack_canary;
10381314562aSHiroshi Shimamoto #endif
10391da177e4SLinus Torvalds /*
10405eca1c10SIngo Molnar * Pointers to the (original) parent process, youngest child, younger sibling,
10411da177e4SLinus Torvalds * older sibling, respectively. (p->father can be replaced with
1042f470021aSRoland McGrath * p->real_parent->pid)
10431da177e4SLinus Torvalds */
10445eca1c10SIngo Molnar
10455eca1c10SIngo Molnar /* Real parent process: */
10465eca1c10SIngo Molnar struct task_struct __rcu *real_parent;
10475eca1c10SIngo Molnar
10485eca1c10SIngo Molnar /* Recipient of SIGCHLD, wait4() reports: */
10495eca1c10SIngo Molnar struct task_struct __rcu *parent;
10501da177e4SLinus Torvalds
1051f470021aSRoland McGrath /*
10525eca1c10SIngo Molnar * Children/sibling form the list of natural children:
10535eca1c10SIngo Molnar */
10545eca1c10SIngo Molnar struct list_head children;
10555eca1c10SIngo Molnar struct list_head sibling;
10565eca1c10SIngo Molnar struct task_struct *group_leader;
10575eca1c10SIngo Molnar
10585eca1c10SIngo Molnar /*
10595eca1c10SIngo Molnar * 'ptraced' is the list of tasks this task is using ptrace() on.
10605eca1c10SIngo Molnar *
1061f470021aSRoland McGrath * This includes both natural children and PTRACE_ATTACH targets.
10625eca1c10SIngo Molnar * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1063f470021aSRoland McGrath */
1064f470021aSRoland McGrath struct list_head ptraced;
1065f470021aSRoland McGrath struct list_head ptrace_entry;
1066f470021aSRoland McGrath
10671da177e4SLinus Torvalds /* PID/PID hash table linkage. */
10682c470475SEric W. Biederman struct pid *thread_pid;
10692c470475SEric W. Biederman struct hlist_node pid_links[PIDTYPE_MAX];
10700c740d0aSOleg Nesterov struct list_head thread_node;
10711da177e4SLinus Torvalds
10725eca1c10SIngo Molnar struct completion *vfork_done;
10731da177e4SLinus Torvalds
10745eca1c10SIngo Molnar /* CLONE_CHILD_SETTID: */
10755eca1c10SIngo Molnar int __user *set_child_tid;
10765eca1c10SIngo Molnar
10775eca1c10SIngo Molnar /* CLONE_CHILD_CLEARTID: */
10785eca1c10SIngo Molnar int __user *clear_child_tid;
10795eca1c10SIngo Molnar
1080e32cf5dfSEric W. Biederman /* PF_KTHREAD | PF_IO_WORKER */
1081e32cf5dfSEric W. Biederman void *worker_private;
10823bfe6106SJens Axboe
10835eca1c10SIngo Molnar u64 utime;
10845eca1c10SIngo Molnar u64 stime;
108540565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
10865eca1c10SIngo Molnar u64 utimescaled;
10875eca1c10SIngo Molnar u64 stimescaled;
108840565b5aSStanislaw Gruszka #endif
108916a6d9beSFrederic Weisbecker u64 gtime;
10909d7fb042SPeter Zijlstra struct prev_cputime prev_cputime;
10916a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1092bac5b6b6SFrederic Weisbecker struct vtime vtime;
10936a61671bSFrederic Weisbecker #endif
1094d027d45dSFrederic Weisbecker
1095d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
1096f009a7a7SFrederic Weisbecker atomic_t tick_dep_mask;
1097d027d45dSFrederic Weisbecker #endif
10985eca1c10SIngo Molnar /* Context switch counts: */
10995eca1c10SIngo Molnar unsigned long nvcsw;
11005eca1c10SIngo Molnar unsigned long nivcsw;
11015eca1c10SIngo Molnar
11025eca1c10SIngo Molnar /* Monotonic time in nsecs: */
11035eca1c10SIngo Molnar u64 start_time;
11045eca1c10SIngo Molnar
11055eca1c10SIngo Molnar /* Boot based time in nsecs: */
1106cf25e24dSPeter Zijlstra u64 start_boottime;
11075eca1c10SIngo Molnar
11085eca1c10SIngo Molnar /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
11095eca1c10SIngo Molnar unsigned long min_flt;
11105eca1c10SIngo Molnar unsigned long maj_flt;
11111da177e4SLinus Torvalds
11122b69942fSThomas Gleixner /* Empty if CONFIG_POSIX_CPUTIMERS=n */
11132b69942fSThomas Gleixner struct posix_cputimers posix_cputimers;
11141da177e4SLinus Torvalds
11151fb497ddSThomas Gleixner #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
11161fb497ddSThomas Gleixner struct posix_cputimers_work posix_cputimers_work;
11171fb497ddSThomas Gleixner #endif
11181fb497ddSThomas Gleixner
11195eca1c10SIngo Molnar /* Process credentials: */
11205eca1c10SIngo Molnar
11215eca1c10SIngo Molnar /* Tracer's credentials at attach: */
11225eca1c10SIngo Molnar const struct cred __rcu *ptracer_cred;
11235eca1c10SIngo Molnar
11245eca1c10SIngo Molnar /* Objective and real subjective task credentials (COW): */
11255eca1c10SIngo Molnar const struct cred __rcu *real_cred;
11265eca1c10SIngo Molnar
11275eca1c10SIngo Molnar /* Effective (overridable) subjective task credentials (COW): */
11285eca1c10SIngo Molnar const struct cred __rcu *cred;
11295eca1c10SIngo Molnar
11307743c48eSDavid Howells #ifdef CONFIG_KEYS
11317743c48eSDavid Howells /* Cached requested key. */
11327743c48eSDavid Howells struct key *cached_requested_key;
11337743c48eSDavid Howells #endif
11347743c48eSDavid Howells
11355eca1c10SIngo Molnar /*
11365eca1c10SIngo Molnar * executable name, excluding path.
11375eca1c10SIngo Molnar *
11384cc0473dSYafang Shao * - normally initialized begin_new_exec()
11394cc0473dSYafang Shao * - set it with set_task_comm()
11404cc0473dSYafang Shao * - strscpy_pad() to ensure it is always NUL-terminated and
11414cc0473dSYafang Shao * zero-padded
11424cc0473dSYafang Shao * - task_lock() to ensure the operation is atomic and the name is
11434cc0473dSYafang Shao * fully updated.
11445eca1c10SIngo Molnar */
11455eca1c10SIngo Molnar char comm[TASK_COMM_LEN];
11465eca1c10SIngo Molnar
1147756daf26SNeilBrown struct nameidata *nameidata;
11485eca1c10SIngo Molnar
11493d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
11501da177e4SLinus Torvalds struct sysv_sem sysvsem;
1151ab602f79SJack Miller struct sysv_shm sysvshm;
11523d5b6fccSAlexey Dobriyan #endif
1153e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
115482a1fcb9SIngo Molnar unsigned long last_switch_count;
1155a2e51445SDmitry Vyukov unsigned long last_switch_time;
115682a1fcb9SIngo Molnar #endif
11575eca1c10SIngo Molnar /* Filesystem information: */
11581da177e4SLinus Torvalds struct fs_struct *fs;
11595eca1c10SIngo Molnar
11605eca1c10SIngo Molnar /* Open file information: */
11611da177e4SLinus Torvalds struct files_struct *files;
11625eca1c10SIngo Molnar
11630f212204SJens Axboe #ifdef CONFIG_IO_URING
11640f212204SJens Axboe struct io_uring_task *io_uring;
11650f212204SJens Axboe #endif
11660f212204SJens Axboe
11675eca1c10SIngo Molnar /* Namespaces: */
1168ab516013SSerge E. Hallyn struct nsproxy *nsproxy;
11695eca1c10SIngo Molnar
11705eca1c10SIngo Molnar /* Signal handlers: */
11711da177e4SLinus Torvalds struct signal_struct *signal;
1172913292c9SMadhuparna Bhowmik struct sighand_struct __rcu *sighand;
11735eca1c10SIngo Molnar sigset_t blocked;
11745eca1c10SIngo Molnar sigset_t real_blocked;
11755eca1c10SIngo Molnar /* Restored if set_restore_sigmask() was used: */
11765eca1c10SIngo Molnar sigset_t saved_sigmask;
11771da177e4SLinus Torvalds struct sigpending pending;
11781da177e4SLinus Torvalds unsigned long sas_ss_sp;
11791da177e4SLinus Torvalds size_t sas_ss_size;
11805eca1c10SIngo Molnar unsigned int sas_ss_flags;
11812e01fabeSOleg Nesterov
118267d12145SAl Viro struct callback_head *task_works;
1183e73f8959SOleg Nesterov
11844b7d248bSRichard Guy Briggs #ifdef CONFIG_AUDIT
1185bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
11865f3d544fSRichard Guy Briggs struct audit_context *audit_context;
11875f3d544fSRichard Guy Briggs #endif
1188e1760bd5SEric W. Biederman kuid_t loginuid;
11894746ec5bSEric Paris unsigned int sessionid;
1190bfef93a5SAl Viro #endif
1191932ecebbSWill Drewry struct seccomp seccomp;
11921446e1dfSGabriel Krisman Bertazi struct syscall_user_dispatch syscall_dispatch;
11931da177e4SLinus Torvalds
11945eca1c10SIngo Molnar /* Thread group tracking: */
1195d1e7fd64SEric W. Biederman u64 parent_exec_id;
1196d1e7fd64SEric W. Biederman u64 self_exec_id;
11975eca1c10SIngo Molnar
11985eca1c10SIngo Molnar /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
11991da177e4SLinus Torvalds spinlock_t alloc_lock;
12001da177e4SLinus Torvalds
1201b29739f9SIngo Molnar /* Protection of the PI data structures: */
12021d615482SThomas Gleixner raw_spinlock_t pi_lock;
1203b29739f9SIngo Molnar
120476751049SPeter Zijlstra struct wake_q_node wake_q;
120576751049SPeter Zijlstra
120623f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
12075eca1c10SIngo Molnar /* PI waiters blocked on a rt_mutex held by this task: */
1208a23ba907SDavidlohr Bueso struct rb_root_cached pi_waiters;
1209e96a7705SXunlei Pang /* Updated under owner's pi_lock and rq lock */
1210e96a7705SXunlei Pang struct task_struct *pi_top_task;
12115eca1c10SIngo Molnar /* Deadlock detection and priority inheritance handling: */
121223f78d4aSIngo Molnar struct rt_mutex_waiter *pi_blocked_on;
121323f78d4aSIngo Molnar #endif
121423f78d4aSIngo Molnar
1215408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
12165eca1c10SIngo Molnar /* Mutex deadlock detection: */
1217408894eeSIngo Molnar struct mutex_waiter *blocked_on;
1218408894eeSIngo Molnar #endif
12195eca1c10SIngo Molnar
1220312364f3SDaniel Vetter #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1221312364f3SDaniel Vetter int non_block_count;
1222312364f3SDaniel Vetter #endif
1223312364f3SDaniel Vetter
1224de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
12250584df9cSMarco Elver struct irqtrace_events irqtrace;
1226de8f5e4fSPeter Zijlstra unsigned int hardirq_threaded;
1227c86e9b98SPeter Zijlstra u64 hardirq_chain_key;
1228fa1452e8SHiroshi Shimamoto int softirqs_enabled;
1229de30a2b3SIngo Molnar int softirq_context;
123040db1739SSebastian Andrzej Siewior int irq_config;
1231de30a2b3SIngo Molnar #endif
1232728b478dSThomas Gleixner #ifdef CONFIG_PREEMPT_RT
1233728b478dSThomas Gleixner int softirq_disable_cnt;
1234728b478dSThomas Gleixner #endif
12355eca1c10SIngo Molnar
1236fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1237bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1238fbb9ce95SIngo Molnar u64 curr_chain_key;
1239fbb9ce95SIngo Molnar int lockdep_depth;
1240fbb9ce95SIngo Molnar unsigned int lockdep_recursion;
1241c7aceabaSRichard Kennedy struct held_lock held_locks[MAX_LOCK_DEPTH];
1242fbb9ce95SIngo Molnar #endif
12435eca1c10SIngo Molnar
12445cf53f3cSElena Petrova #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1245c6d30853SAndrey Ryabinin unsigned int in_ubsan;
1246c6d30853SAndrey Ryabinin #endif
1247408894eeSIngo Molnar
12485eca1c10SIngo Molnar /* Journalling filesystem info: */
12491da177e4SLinus Torvalds void *journal_info;
12501da177e4SLinus Torvalds
12515eca1c10SIngo Molnar /* Stacked block device info: */
1252bddd87c7SAkinobu Mita struct bio_list *bio_list;
1253d89d8796SNeil Brown
12545eca1c10SIngo Molnar /* Stack plugging: */
125573c10101SJens Axboe struct blk_plug *plug;
125673c10101SJens Axboe
12575eca1c10SIngo Molnar /* VM state: */
12581da177e4SLinus Torvalds struct reclaim_state *reclaim_state;
12591da177e4SLinus Torvalds
12601da177e4SLinus Torvalds struct io_context *io_context;
12611da177e4SLinus Torvalds
12625e1f0f09SMel Gorman #ifdef CONFIG_COMPACTION
12635e1f0f09SMel Gorman struct capture_control *capture_control;
12645e1f0f09SMel Gorman #endif
12655eca1c10SIngo Molnar /* Ptrace state: */
12661da177e4SLinus Torvalds unsigned long ptrace_message;
1267ae7795bcSEric W. Biederman kernel_siginfo_t *last_siginfo;
12685eca1c10SIngo Molnar
12697c3ab738SAndrew Morton struct task_io_accounting ioac;
1270eb414681SJohannes Weiner #ifdef CONFIG_PSI
1271eb414681SJohannes Weiner /* Pressure stall state */
1272eb414681SJohannes Weiner unsigned int psi_flags;
1273eb414681SJohannes Weiner #endif
12745eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT
12755eca1c10SIngo Molnar /* Accumulated RSS usage: */
12765eca1c10SIngo Molnar u64 acct_rss_mem1;
12775eca1c10SIngo Molnar /* Accumulated virtual memory usage: */
12785eca1c10SIngo Molnar u64 acct_vm_mem1;
12795eca1c10SIngo Molnar /* stime + utime since last update: */
12805eca1c10SIngo Molnar u64 acct_timexpd;
12811da177e4SLinus Torvalds #endif
12821da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
12835eca1c10SIngo Molnar /* Protected by ->alloc_lock: */
12845eca1c10SIngo Molnar nodemask_t mems_allowed;
12853b03706fSIngo Molnar /* Sequence number to catch updates: */
1286b7505861SAhmed S. Darwish seqcount_spinlock_t mems_allowed_seq;
1287825a46afSPaul Jackson int cpuset_mem_spread_rotor;
12881da177e4SLinus Torvalds #endif
1289ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
12905eca1c10SIngo Molnar /* Control Group info protected by css_set_lock: */
12912c392b8cSArnd Bergmann struct css_set __rcu *cgroups;
12925eca1c10SIngo Molnar /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1293817929ecSPaul Menage struct list_head cg_list;
1294ddbcc7e8SPaul Menage #endif
1295e6d42931SJohannes Weiner #ifdef CONFIG_X86_CPU_RESCTRL
12960734ded1SVikas Shivappa u32 closid;
1297d6aaba61SVikas Shivappa u32 rmid;
1298e02737d5SFenghua Yu #endif
129942b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
13000771dfefSIngo Molnar struct robust_list_head __user *robust_list;
130134f192c6SIngo Molnar #ifdef CONFIG_COMPAT
130234f192c6SIngo Molnar struct compat_robust_list_head __user *compat_robust_list;
130334f192c6SIngo Molnar #endif
1304c87e2837SIngo Molnar struct list_head pi_state_list;
1305c87e2837SIngo Molnar struct futex_pi_state *pi_state_cache;
13063f186d97SThomas Gleixner struct mutex futex_exit_mutex;
13073d4775dfSThomas Gleixner unsigned int futex_state;
130842b2dd0aSAlexey Dobriyan #endif
1309cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
13100d40a6d8SSebastian Andrzej Siewior u8 perf_recursion[PERF_NR_CONTEXTS];
1311bd275681SPeter Zijlstra struct perf_event_context *perf_event_ctxp;
1312cdd6c482SIngo Molnar struct mutex perf_event_mutex;
1313cdd6c482SIngo Molnar struct list_head perf_event_list;
1314a63eaf34SPaul Mackerras #endif
13158f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
13168f47b187SThomas Gleixner unsigned long preempt_disable_ip;
13178f47b187SThomas Gleixner #endif
1318c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
13195eca1c10SIngo Molnar /* Protected by alloc_lock: */
13205eca1c10SIngo Molnar struct mempolicy *mempolicy;
132145816682SVlastimil Babka short il_prev;
1322fa3bea4eSGregory Price u8 il_weight;
1323207205a2SEric Dumazet short pref_node_fork;
1324c7aceabaSRichard Kennedy #endif
1325cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1326cbee9f88SPeter Zijlstra int numa_scan_seq;
1327cbee9f88SPeter Zijlstra unsigned int numa_scan_period;
1328598f0ec0SMel Gorman unsigned int numa_scan_period_max;
1329de1c9ce6SRik van Riel int numa_preferred_nid;
13306b9a7460SMel Gorman unsigned long numa_migrate_retry;
13315eca1c10SIngo Molnar /* Migration stamp: */
13325eca1c10SIngo Molnar u64 node_stamp;
13337e2703e6SRik van Riel u64 last_task_numa_placement;
13347e2703e6SRik van Riel u64 last_sum_exec_runtime;
1335cbee9f88SPeter Zijlstra struct callback_head numa_work;
1336f809ca9aSMel Gorman
1337cb361d8cSJann Horn /*
1338cb361d8cSJann Horn * This pointer is only modified for current in syscall and
1339cb361d8cSJann Horn * pagefault context (and for tasks being destroyed), so it can be read
1340cb361d8cSJann Horn * from any of the following contexts:
1341cb361d8cSJann Horn * - RCU read-side critical section
1342cb361d8cSJann Horn * - current->numa_group from everywhere
1343cb361d8cSJann Horn * - task's runqueue locked, task not running
1344cb361d8cSJann Horn */
1345cb361d8cSJann Horn struct numa_group __rcu *numa_group;
13468c8a743cSPeter Zijlstra
1347745d6147SMel Gorman /*
134844dba3d5SIulia Manda * numa_faults is an array split into four regions:
134944dba3d5SIulia Manda * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
135044dba3d5SIulia Manda * in this precise order.
135144dba3d5SIulia Manda *
135244dba3d5SIulia Manda * faults_memory: Exponential decaying average of faults on a per-node
135344dba3d5SIulia Manda * basis. Scheduling placement decisions are made based on these
135444dba3d5SIulia Manda * counts. The values remain static for the duration of a PTE scan.
135544dba3d5SIulia Manda * faults_cpu: Track the nodes the process was running on when a NUMA
135644dba3d5SIulia Manda * hinting fault was incurred.
135744dba3d5SIulia Manda * faults_memory_buffer and faults_cpu_buffer: Record faults per node
135844dba3d5SIulia Manda * during the current scan window. When the scan completes, the counts
135944dba3d5SIulia Manda * in faults_memory and faults_cpu decay and these values are copied.
1360745d6147SMel Gorman */
136144dba3d5SIulia Manda unsigned long *numa_faults;
136283e1d2cdSMel Gorman unsigned long total_numa_faults;
1363745d6147SMel Gorman
1364745d6147SMel Gorman /*
136504bb2f94SRik van Riel * numa_faults_locality tracks if faults recorded during the last
1366074c2381SMel Gorman * scan window were remote/local or failed to migrate. The task scan
1367074c2381SMel Gorman * period is adapted based on the locality of the faults with different
1368074c2381SMel Gorman * weights depending on whether they were shared or private faults
136904bb2f94SRik van Riel */
1370074c2381SMel Gorman unsigned long numa_faults_locality[3];
137104bb2f94SRik van Riel
1372b32e86b4SIngo Molnar unsigned long numa_pages_migrated;
1373cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1374cbee9f88SPeter Zijlstra
1375d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ
1376d7822b1eSMathieu Desnoyers struct rseq __user *rseq;
1377ee3e3ac0SMathieu Desnoyers u32 rseq_len;
1378d7822b1eSMathieu Desnoyers u32 rseq_sig;
1379d7822b1eSMathieu Desnoyers /*
1380d7822b1eSMathieu Desnoyers * RmW on rseq_event_mask must be performed atomically
1381d7822b1eSMathieu Desnoyers * with respect to preemption.
1382d7822b1eSMathieu Desnoyers */
1383d7822b1eSMathieu Desnoyers unsigned long rseq_event_mask;
13847d5265ffSMathieu Desnoyers # ifdef CONFIG_DEBUG_RSEQ
13857d5265ffSMathieu Desnoyers /*
13867d5265ffSMathieu Desnoyers * This is a place holder to save a copy of the rseq fields for
13877d5265ffSMathieu Desnoyers * validation of read-only fields. The struct rseq has a
13887d5265ffSMathieu Desnoyers * variable-length array at the end, so it cannot be used
13897d5265ffSMathieu Desnoyers * directly. Reserve a size large enough for the known fields.
13907d5265ffSMathieu Desnoyers */
13917d5265ffSMathieu Desnoyers char rseq_fields[sizeof(struct rseq)];
13927d5265ffSMathieu Desnoyers # endif
1393d7822b1eSMathieu Desnoyers #endif
1394d7822b1eSMathieu Desnoyers
1395af7f588dSMathieu Desnoyers #ifdef CONFIG_SCHED_MM_CID
1396af7f588dSMathieu Desnoyers int mm_cid; /* Current cid in mm */
1397223baf9dSMathieu Desnoyers int last_mm_cid; /* Most recent cid in mm */
1398223baf9dSMathieu Desnoyers int migrate_from_cpu;
1399af7f588dSMathieu Desnoyers int mm_cid_active; /* Whether cid bitmap is active */
1400223baf9dSMathieu Desnoyers struct callback_head cid_work;
1401af7f588dSMathieu Desnoyers #endif
1402af7f588dSMathieu Desnoyers
140372b252aeSMel Gorman struct tlbflush_unmap_batch tlb_ubc;
140472b252aeSMel Gorman
14055eca1c10SIngo Molnar /* Cache last used pipe for splice(): */
1406b92ce558SJens Axboe struct pipe_inode_info *splice_pipe;
14075640f768SEric Dumazet
14085640f768SEric Dumazet struct page_frag task_frag;
14095640f768SEric Dumazet
1410ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
1411ca74e92bSShailabh Nagar struct task_delay_info *delays;
1412ca74e92bSShailabh Nagar #endif
141347913d4eSIngo Molnar
1414f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1415f4f154fdSAkinobu Mita int make_it_fail;
14169049f2f6SAkinobu Mita unsigned int fail_nth;
1417f4f154fdSAkinobu Mita #endif
14189d823e8fSWu Fengguang /*
14195eca1c10SIngo Molnar * When (nr_dirtied >= nr_dirtied_pause), it's time to call
14205eca1c10SIngo Molnar * balance_dirty_pages() for a dirty throttling pause:
14219d823e8fSWu Fengguang */
14229d823e8fSWu Fengguang int nr_dirtied;
14239d823e8fSWu Fengguang int nr_dirtied_pause;
14245eca1c10SIngo Molnar /* Start of a write-and-pause period: */
14255eca1c10SIngo Molnar unsigned long dirty_paused_when;
14269d823e8fSWu Fengguang
14279745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
14289745512cSArjan van de Ven int latency_record_count;
14299745512cSArjan van de Ven struct latency_record latency_record[LT_SAVECOUNT];
14309745512cSArjan van de Ven #endif
14316976675dSArjan van de Ven /*
14325eca1c10SIngo Molnar * Time slack values; these are used to round up poll() and
14336976675dSArjan van de Ven * select() etc timeout values. These are in nanoseconds.
14346976675dSArjan van de Ven */
1435da8b44d5SJohn Stultz u64 timer_slack_ns;
1436da8b44d5SJohn Stultz u64 default_timer_slack_ns;
1437f8d570a4SDavid Miller
1438d73b4936SAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
14390b24beccSAndrey Ryabinin unsigned int kasan_depth;
14400b24beccSAndrey Ryabinin #endif
144192c209acSMarco Elver
1442dfd402a4SMarco Elver #ifdef CONFIG_KCSAN
1443dfd402a4SMarco Elver struct kcsan_ctx kcsan_ctx;
144492c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS
144592c209acSMarco Elver struct irqtrace_events kcsan_save_irqtrace;
144692c209acSMarco Elver #endif
144769562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
144869562e49SMarco Elver int kcsan_stack_depth;
144969562e49SMarco Elver #endif
1450dfd402a4SMarco Elver #endif
14515eca1c10SIngo Molnar
1452f80be457SAlexander Potapenko #ifdef CONFIG_KMSAN
1453f80be457SAlexander Potapenko struct kmsan_ctx kmsan_ctx;
1454f80be457SAlexander Potapenko #endif
1455f80be457SAlexander Potapenko
1456393824f6SPatricia Alfonso #if IS_ENABLED(CONFIG_KUNIT)
1457393824f6SPatricia Alfonso struct kunit *kunit_test;
1458393824f6SPatricia Alfonso #endif
1459393824f6SPatricia Alfonso
1460fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
14615eca1c10SIngo Molnar /* Index of current stored address in ret_stack: */
1462f201ae23SFrederic Weisbecker int curr_ret_stack;
146339eb456dSSteven Rostedt (VMware) int curr_ret_depth;
14645eca1c10SIngo Molnar
14655eca1c10SIngo Molnar /* Stack of return addresses for return function tracing: */
146642675b72SSteven Rostedt (VMware) unsigned long *ret_stack;
14675eca1c10SIngo Molnar
14685eca1c10SIngo Molnar /* Timestamp for last schedule: */
14698aef2d28SSteven Rostedt unsigned long long ftrace_timestamp;
14703c9880f3SSteven Rostedt unsigned long long ftrace_sleeptime;
14715eca1c10SIngo Molnar
1472f201ae23SFrederic Weisbecker /*
1473f201ae23SFrederic Weisbecker * Number of functions that haven't been traced
14745eca1c10SIngo Molnar * because of depth overrun:
1475f201ae23SFrederic Weisbecker */
1476f201ae23SFrederic Weisbecker atomic_t trace_overrun;
14775eca1c10SIngo Molnar
14785eca1c10SIngo Molnar /* Pause tracing: */
1479380c4b14SFrederic Weisbecker atomic_t tracing_graph_pause;
1480f201ae23SFrederic Weisbecker #endif
14815eca1c10SIngo Molnar
1482ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
14835eca1c10SIngo Molnar /* Bitmask and counter of trace recursion: */
1484261842b7SSteven Rostedt unsigned long trace_recursion;
1485261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
14865eca1c10SIngo Molnar
14875c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
1488eec028c9SAndrey Konovalov /* See kernel/kcov.c for more details. */
1489eec028c9SAndrey Konovalov
14905eca1c10SIngo Molnar /* Coverage collection mode enabled for this task (0 if disabled): */
14910ed557aaSMark Rutland unsigned int kcov_mode;
14925eca1c10SIngo Molnar
14935eca1c10SIngo Molnar /* Size of the kcov_area: */
14945eca1c10SIngo Molnar unsigned int kcov_size;
14955eca1c10SIngo Molnar
14965eca1c10SIngo Molnar /* Buffer for coverage collection: */
14975c9a8750SDmitry Vyukov void *kcov_area;
14985eca1c10SIngo Molnar
14995eca1c10SIngo Molnar /* KCOV descriptor wired with this task or NULL: */
15005c9a8750SDmitry Vyukov struct kcov *kcov;
1501eec028c9SAndrey Konovalov
1502eec028c9SAndrey Konovalov /* KCOV common handle for remote coverage collection: */
1503eec028c9SAndrey Konovalov u64 kcov_handle;
1504eec028c9SAndrey Konovalov
1505eec028c9SAndrey Konovalov /* KCOV sequence number: */
1506eec028c9SAndrey Konovalov int kcov_sequence;
15075ff3b30aSAndrey Konovalov
15085ff3b30aSAndrey Konovalov /* Collect coverage from softirq context: */
15095ff3b30aSAndrey Konovalov unsigned int kcov_softirq;
15105c9a8750SDmitry Vyukov #endif
15115eca1c10SIngo Molnar
15121c3a0b3dSRoman Gushchin #ifdef CONFIG_MEMCG_V1
1513626ebc41STejun Heo struct mem_cgroup *memcg_in_oom;
15141c3a0b3dSRoman Gushchin #endif
1515b23afb93STejun Heo
15161c3a0b3dSRoman Gushchin #ifdef CONFIG_MEMCG
15175eca1c10SIngo Molnar /* Number of pages to reclaim on returning to userland: */
1518b23afb93STejun Heo unsigned int memcg_nr_pages_over_high;
1519d46eb14bSShakeel Butt
1520d46eb14bSShakeel Butt /* Used by memcontrol for targeted memcg charge: */
1521d46eb14bSShakeel Butt struct mem_cgroup *active_memcg;
15225eca1c10SIngo Molnar
15233a3b7fecSJohannes Weiner /* Cache for current->cgroups->memcg->objcg lookups: */
15241aacbd35SRoman Gushchin struct obj_cgroup *objcg;
15251aacbd35SRoman Gushchin #endif
15261aacbd35SRoman Gushchin
1527d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP
1528f05837edSChristoph Hellwig struct gendisk *throttle_disk;
1529d09d8df3SJosef Bacik #endif
1530d09d8df3SJosef Bacik
15310326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
15320326f5a9SSrikar Dronamraju struct uprobe_task *utask;
15330326f5a9SSrikar Dronamraju #endif
1534cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1535cafe5635SKent Overstreet unsigned int sequential_io;
1536cafe5635SKent Overstreet unsigned int sequential_io_avg;
1537cafe5635SKent Overstreet #endif
15385fbda3ecSThomas Gleixner struct kmap_ctrl kmap_ctrl;
15398eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
15408eb23b9fSPeter Zijlstra unsigned long task_state_change;
15415f220be2SThomas Gleixner # ifdef CONFIG_PREEMPT_RT
15425f220be2SThomas Gleixner unsigned long saved_state_change;
15435f220be2SThomas Gleixner # endif
15448eb23b9fSPeter Zijlstra #endif
154522df776aSDavid Vernet struct rcu_head rcu;
154622df776aSDavid Vernet refcount_t rcu_users;
15478bcbde54SDavid Hildenbrand int pagefault_disabled;
154803049269SMichal Hocko #ifdef CONFIG_MMU
154929c696e1SVladimir Davydov struct task_struct *oom_reaper_list;
1550e4a38402SNico Pache struct timer_list oom_reaper_timer;
155103049269SMichal Hocko #endif
1552ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
1553ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area;
1554ba14a194SAndy Lutomirski #endif
155568f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
15565eca1c10SIngo Molnar /* A live task holds one reference: */
1557f0b89d39SElena Reshetova refcount_t stack_refcount;
155868f24b08SAndy Lutomirski #endif
1559d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH
1560d83a7cb3SJosh Poimboeuf int patch_state;
1561d83a7cb3SJosh Poimboeuf #endif
1562e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY
1563e4e55b47STetsuo Handa /* Used by LSM modules for access restriction: */
1564e4e55b47STetsuo Handa void *security;
1565e4e55b47STetsuo Handa #endif
1566a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL
1567a10787e6SSong Liu /* Used by BPF task local storage */
1568a10787e6SSong Liu struct bpf_local_storage __rcu *bpf_storage;
1569c7603cfaSAndrii Nakryiko /* Used for BPF run context */
1570c7603cfaSAndrii Nakryiko struct bpf_run_ctx *bpf_ctx;
1571a10787e6SSong Liu #endif
1572401cb7daSSebastian Andrzej Siewior /* Used by BPF for per-TASK xdp storage */
1573401cb7daSSebastian Andrzej Siewior struct bpf_net_context *bpf_net_context;
157429e48ce8SKees Cook
1575afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1576afaef01cSAlexander Popov unsigned long lowest_stack;
1577c8d12627SAlexander Popov unsigned long prev_lowest_stack;
1578afaef01cSAlexander Popov #endif
1579afaef01cSAlexander Popov
15805567d11cSPeter Zijlstra #ifdef CONFIG_X86_MCE
1581c0ab7ffcSTony Luck void __user *mce_vaddr;
1582c0ab7ffcSTony Luck __u64 mce_kflags;
15835567d11cSPeter Zijlstra u64 mce_addr;
158417fae129STony Luck __u64 mce_ripv : 1,
158517fae129STony Luck mce_whole_page : 1,
158617fae129STony Luck __mce_reserved : 62;
15875567d11cSPeter Zijlstra struct callback_head mce_kill_me;
158881065b35STony Luck int mce_count;
15895567d11cSPeter Zijlstra #endif
15905567d11cSPeter Zijlstra
1591d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES
1592d741bf41SPeter Zijlstra struct llist_head kretprobe_instances;
1593d741bf41SPeter Zijlstra #endif
159454ecbe6fSMasami Hiramatsu #ifdef CONFIG_RETHOOK
159554ecbe6fSMasami Hiramatsu struct llist_head rethooks;
159654ecbe6fSMasami Hiramatsu #endif
1597d741bf41SPeter Zijlstra
159858e106e7SBalbir Singh #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
159958e106e7SBalbir Singh /*
160058e106e7SBalbir Singh * If L1D flush is supported on mm context switch
160158e106e7SBalbir Singh * then we use this callback head to queue kill work
160258e106e7SBalbir Singh * to kill tasks that are not running on SMT disabled
160358e106e7SBalbir Singh * cores
160458e106e7SBalbir Singh */
160558e106e7SBalbir Singh struct callback_head l1d_flush_kill;
160658e106e7SBalbir Singh #endif
160758e106e7SBalbir Singh
1608102227b9SDaniel Bristot de Oliveira #ifdef CONFIG_RV
1609102227b9SDaniel Bristot de Oliveira /*
1610102227b9SDaniel Bristot de Oliveira * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1611102227b9SDaniel Bristot de Oliveira * If we find justification for more monitors, we can think
1612102227b9SDaniel Bristot de Oliveira * about adding more or developing a dynamic method. So far,
1613102227b9SDaniel Bristot de Oliveira * none of these are justified.
1614102227b9SDaniel Bristot de Oliveira */
1615102227b9SDaniel Bristot de Oliveira union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1616102227b9SDaniel Bristot de Oliveira #endif
1617102227b9SDaniel Bristot de Oliveira
1618fd593511SBeau Belgrave #ifdef CONFIG_USER_EVENTS
1619fd593511SBeau Belgrave struct user_event_mm *user_event_mm;
1620fd593511SBeau Belgrave #endif
1621fd593511SBeau Belgrave
162229e48ce8SKees Cook /*
162329e48ce8SKees Cook * New fields for task_struct should be added above here, so that
162429e48ce8SKees Cook * they are included in the randomized portion of task_struct.
162529e48ce8SKees Cook */
162629e48ce8SKees Cook randomized_struct_fields_end
162729e48ce8SKees Cook
16285eca1c10SIngo Molnar /* CPU-specific state of this task: */
16290c8c0f03SDave Hansen struct thread_struct thread;
16305eca1c10SIngo Molnar
16310c8c0f03SDave Hansen /*
16320c8c0f03SDave Hansen * WARNING: on x86, 'thread_struct' contains a variable-sized
16330c8c0f03SDave Hansen * structure. It *MUST* be at the end of 'task_struct'.
16340c8c0f03SDave Hansen *
16350c8c0f03SDave Hansen * Do not put anything below here!
16360c8c0f03SDave Hansen */
16371da177e4SLinus Torvalds };
16381da177e4SLinus Torvalds
163906eb6184SPeter Zijlstra #define TASK_REPORT_IDLE (TASK_REPORT + 1)
164006eb6184SPeter Zijlstra #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
164106eb6184SPeter Zijlstra
__task_state_index(unsigned int tsk_state,unsigned int tsk_exit_state)1642fa2c3254SValentin Schneider static inline unsigned int __task_state_index(unsigned int tsk_state,
1643fa2c3254SValentin Schneider unsigned int tsk_exit_state)
164420435d84SXie XiuQi {
1645fa2c3254SValentin Schneider unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
164620435d84SXie XiuQi
164706eb6184SPeter Zijlstra BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
164806eb6184SPeter Zijlstra
16490d6b3528SNeilBrown if ((tsk_state & TASK_IDLE) == TASK_IDLE)
165006eb6184SPeter Zijlstra state = TASK_REPORT_IDLE;
165106eb6184SPeter Zijlstra
165225795ef6SValentin Schneider /*
165325795ef6SValentin Schneider * We're lying here, but rather than expose a completely new task state
165425795ef6SValentin Schneider * to userspace, we can make this appear as if the task has gone through
165525795ef6SValentin Schneider * a regular rt_mutex_lock() call.
1656f718faf3SChen Ridong * Report frozen tasks as uninterruptible.
165725795ef6SValentin Schneider */
1658f718faf3SChen Ridong if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
165925795ef6SValentin Schneider state = TASK_UNINTERRUPTIBLE;
166025795ef6SValentin Schneider
16611593baabSPeter Zijlstra return fls(state);
16621593baabSPeter Zijlstra }
166320435d84SXie XiuQi
task_state_index(struct task_struct * tsk)1664fa2c3254SValentin Schneider static inline unsigned int task_state_index(struct task_struct *tsk)
1665fa2c3254SValentin Schneider {
1666fa2c3254SValentin Schneider return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1667fa2c3254SValentin Schneider }
1668fa2c3254SValentin Schneider
task_index_to_char(unsigned int state)16691d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state)
16701593baabSPeter Zijlstra {
16718ef9925bSPeter Zijlstra static const char state_char[] = "RSDTtXZPI";
16721593baabSPeter Zijlstra
16737c45d828SYury Norov BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
16741593baabSPeter Zijlstra
16751593baabSPeter Zijlstra return state_char[state];
16761593baabSPeter Zijlstra }
16771593baabSPeter Zijlstra
task_state_to_char(struct task_struct * tsk)16781593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk)
16791593baabSPeter Zijlstra {
16801d48b080SPeter Zijlstra return task_index_to_char(task_state_index(tsk));
168120435d84SXie XiuQi }
168220435d84SXie XiuQi
16839ec52099SCedric Le Goater extern struct pid *cad_pid;
16849ec52099SCedric Le Goater
16851da177e4SLinus Torvalds /*
16861da177e4SLinus Torvalds * Per process flags
16871da177e4SLinus Torvalds */
168801ccf592SSebastian Andrzej Siewior #define PF_VCPU 0x00000001 /* I'm a virtual CPU */
1689c1de45caSPeter Zijlstra #define PF_IDLE 0x00000002 /* I am an IDLE thread */
16905eca1c10SIngo Molnar #define PF_EXITING 0x00000004 /* Getting shut down */
169192307383SEric W. Biederman #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
169201ccf592SSebastian Andrzej Siewior #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
169321aa9af0STejun Heo #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
16945eca1c10SIngo Molnar #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
16955eca1c10SIngo Molnar #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
16965eca1c10SIngo Molnar #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
16975eca1c10SIngo Molnar #define PF_DUMPCORE 0x00000200 /* Dumped core */
16985eca1c10SIngo Molnar #define PF_SIGNALED 0x00000400 /* Killed by a signal */
1699cfb837e8SVlastimil Babka #define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
17005eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
17015eca1c10SIngo Molnar #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
170254e6842dSMike Christie #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
17035eca1c10SIngo Molnar #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
1704*ce6d9c1cSMike Snitzer #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
17057dea19f9SMichal Hocko #define PF_KSWAPD 0x00020000 /* I am kswapd */
1706cfb837e8SVlastimil Babka #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
1707cfb837e8SVlastimil Babka #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
1708a37b0715SNeilBrown #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
1709a37b0715SNeilBrown * I am cleaning dirty pages from some other bdi. */
1710246bb0b1SOleg Nesterov #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
17115eca1c10SIngo Molnar #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
17129a8da05dSMichal Hocko #define PF__HOLE__00800000 0x00800000
17139a8da05dSMichal Hocko #define PF__HOLE__01000000 0x01000000
1714fb04563dSPeter Zijlstra #define PF__HOLE__02000000 0x02000000
17153bd37062SSebastian Andrzej Siewior #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
17164db96cf0SAndi Kleen #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1717cfb837e8SVlastimil Babka #define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
1718cfb837e8SVlastimil Babka * See memalloc_pin_save() */
171906b23f92SJens Axboe #define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
1720fb04563dSPeter Zijlstra #define PF__HOLE__40000000 0x40000000
17215eca1c10SIngo Molnar #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
17221da177e4SLinus Torvalds
17231da177e4SLinus Torvalds /*
17241da177e4SLinus Torvalds * Only the _current_ task can read/write to tsk->flags, but other
17251da177e4SLinus Torvalds * tasks can access tsk->flags in readonly mode for example
17261da177e4SLinus Torvalds * with tsk_used_math (like during threaded core dumping).
17271da177e4SLinus Torvalds * There is however an exception to this rule during ptrace
17281da177e4SLinus Torvalds * or during fork: the ptracer task is allowed to write to the
17291da177e4SLinus Torvalds * child->flags of its traced child (same goes for fork, the parent
17301da177e4SLinus Torvalds * can write to the child->flags), because we're guaranteed the
17311da177e4SLinus Torvalds * child is not running and in turn not changing child->flags
17321da177e4SLinus Torvalds * at the same time the parent does it.
17331da177e4SLinus Torvalds */
17341da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
17351da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
17361da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
17371da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
17385eca1c10SIngo Molnar
17391da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
17401da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
17415eca1c10SIngo Molnar
17425eca1c10SIngo Molnar #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
17435eca1c10SIngo Molnar
17441da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
17451da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
17465eca1c10SIngo Molnar
17471da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
17481da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
17491da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
17501da177e4SLinus Torvalds
is_percpu_thread(void)175183d40a61SPeter Zijlstra static __always_inline bool is_percpu_thread(void)
175262ec05ddSThomas Gleixner {
175362ec05ddSThomas Gleixner #ifdef CONFIG_SMP
175462ec05ddSThomas Gleixner return (current->flags & PF_NO_SETAFFINITY) &&
175562ec05ddSThomas Gleixner (current->nr_cpus_allowed == 1);
175662ec05ddSThomas Gleixner #else
175762ec05ddSThomas Gleixner return true;
175862ec05ddSThomas Gleixner #endif
175962ec05ddSThomas Gleixner }
176062ec05ddSThomas Gleixner
17611d4457f9SKees Cook /* Per-process atomic flags. */
1762a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
17632ad654bcSZefan Li #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
17642ad654bcSZefan Li #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1765356e4bffSThomas Gleixner #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1766356e4bffSThomas Gleixner #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
17679137bb27SThomas Gleixner #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
17689137bb27SThomas Gleixner #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
176971368af9SWaiman Long #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
17701d4457f9SKees Cook
1771e0e5070bSZefan Li #define TASK_PFA_TEST(name, func) \
1772e0e5070bSZefan Li static inline bool task_##func(struct task_struct *p) \
1773e0e5070bSZefan Li { return test_bit(PFA_##name, &p->atomic_flags); }
17745eca1c10SIngo Molnar
1775e0e5070bSZefan Li #define TASK_PFA_SET(name, func) \
1776e0e5070bSZefan Li static inline void task_set_##func(struct task_struct *p) \
1777e0e5070bSZefan Li { set_bit(PFA_##name, &p->atomic_flags); }
17785eca1c10SIngo Molnar
1779e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func) \
1780e0e5070bSZefan Li static inline void task_clear_##func(struct task_struct *p) \
1781e0e5070bSZefan Li { clear_bit(PFA_##name, &p->atomic_flags); }
17821d4457f9SKees Cook
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1783e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1784e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
17851d4457f9SKees Cook
17862ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page)
17872ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page)
17882ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
17892ad654bcSZefan Li
17902ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
17912ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab)
17922ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1793544b2c91STejun Heo
1794356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1795356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1796356e4bffSThomas Gleixner TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1797356e4bffSThomas Gleixner
179871368af9SWaiman Long TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
179971368af9SWaiman Long TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
180071368af9SWaiman Long TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
180171368af9SWaiman Long
1802356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1803356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1804356e4bffSThomas Gleixner
18059137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
18069137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
18079137bb27SThomas Gleixner TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
18089137bb27SThomas Gleixner
18099137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
18109137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
18119137bb27SThomas Gleixner
18125eca1c10SIngo Molnar static inline void
1813717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags)
1814907aed48SMel Gorman {
1815717a94b5SNeilBrown current->flags &= ~flags;
1816717a94b5SNeilBrown current->flags |= orig_flags & flags;
1817907aed48SMel Gorman }
1818907aed48SMel Gorman
18195eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
18202ef269efSDietmar Eggemann extern int task_can_attach(struct task_struct *p);
182185989106SDietmar Eggemann extern int dl_bw_alloc(int cpu, u64 dl_bw);
182285989106SDietmar Eggemann extern void dl_bw_free(int cpu, u64 dl_bw);
18231da177e4SLinus Torvalds #ifdef CONFIG_SMP
1824ae894083SCosta Shulyupin
1825ae894083SCosta Shulyupin /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
18265eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1827ae894083SCosta Shulyupin
1828ae894083SCosta Shulyupin /**
1829ae894083SCosta Shulyupin * set_cpus_allowed_ptr - set CPU affinity mask of a task
1830ae894083SCosta Shulyupin * @p: the task
1831ae894083SCosta Shulyupin * @new_mask: CPU affinity mask
1832ae894083SCosta Shulyupin *
1833ae894083SCosta Shulyupin * Return: zero if successful, or a negative error code
1834ae894083SCosta Shulyupin */
18355eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1836b90ca8baSWill Deacon extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1837b90ca8baSWill Deacon extern void release_user_cpus_ptr(struct task_struct *p);
1838234b8ab6SWill Deacon extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
183907ec77a1SWill Deacon extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
184007ec77a1SWill Deacon extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
18411da177e4SLinus Torvalds #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)18425eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
18431e1b6c51SKOSAKI Motohiro {
18441e1b6c51SKOSAKI Motohiro }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)18455eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
18461da177e4SLinus Torvalds {
1847361c1f04SYury Norov /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */
1848361c1f04SYury Norov if ((*cpumask_bits(new_mask) & 1) == 0)
18491da177e4SLinus Torvalds return -EINVAL;
18501da177e4SLinus Torvalds return 0;
18511da177e4SLinus Torvalds }
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)1852b90ca8baSWill Deacon static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1853b90ca8baSWill Deacon {
1854b90ca8baSWill Deacon if (src->user_cpus_ptr)
1855b90ca8baSWill Deacon return -EINVAL;
1856b90ca8baSWill Deacon return 0;
1857b90ca8baSWill Deacon }
release_user_cpus_ptr(struct task_struct * p)1858b90ca8baSWill Deacon static inline void release_user_cpus_ptr(struct task_struct *p)
1859b90ca8baSWill Deacon {
1860b90ca8baSWill Deacon WARN_ON(p->user_cpus_ptr);
1861b90ca8baSWill Deacon }
1862234b8ab6SWill Deacon
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)1863234b8ab6SWill Deacon static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1864234b8ab6SWill Deacon {
1865234b8ab6SWill Deacon return 0;
1866234b8ab6SWill Deacon }
18671da177e4SLinus Torvalds #endif
1868e0ad9556SRusty Russell
1869fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
187036c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
187136c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
18725eca1c10SIngo Molnar
1873d0ea0268SDongsheng Yang /**
1874d0ea0268SDongsheng Yang * task_nice - return the nice value of a given task.
1875d0ea0268SDongsheng Yang * @p: the task in question.
1876d0ea0268SDongsheng Yang *
1877d0ea0268SDongsheng Yang * Return: The nice value [ -20 ... 0 ... 19 ].
1878d0ea0268SDongsheng Yang */
task_nice(const struct task_struct * p)1879d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
1880d0ea0268SDongsheng Yang {
1881d0ea0268SDongsheng Yang return PRIO_TO_NICE((p)->static_prio);
1882d0ea0268SDongsheng Yang }
18835eca1c10SIngo Molnar
188436c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
188536c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
18861da177e4SLinus Torvalds extern int idle_cpu(int cpu);
1887943d355dSRohit Jain extern int available_idle_cpu(int cpu);
18885eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
18895eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
18908b700983SPeter Zijlstra extern void sched_set_fifo(struct task_struct *p);
18918b700983SPeter Zijlstra extern void sched_set_fifo_low(struct task_struct *p);
18928b700983SPeter Zijlstra extern void sched_set_normal(struct task_struct *p, int nice);
18935eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1894794a56ebSJuri Lelli extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
189536c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
18965eca1c10SIngo Molnar
1897c4f30608SPaul E. McKenney /**
1898c4f30608SPaul E. McKenney * is_idle_task - is the specified task an idle task?
1899fa757281SRandy Dunlap * @p: the task in question.
1900e69f6186SYacine Belkadi *
1901e69f6186SYacine Belkadi * Return: 1 if @p is an idle task. 0 otherwise.
1902c4f30608SPaul E. McKenney */
is_idle_task(const struct task_struct * p)1903c94a88f3SMarco Elver static __always_inline bool is_idle_task(const struct task_struct *p)
1904c4f30608SPaul E. McKenney {
1905c1de45caSPeter Zijlstra return !!(p->flags & PF_IDLE);
1906c4f30608SPaul E. McKenney }
19075eca1c10SIngo Molnar
190836c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
1909a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p);
19101da177e4SLinus Torvalds
19111da177e4SLinus Torvalds void yield(void);
19121da177e4SLinus Torvalds
19131da177e4SLinus Torvalds union thread_union {
19140500871fSDavid Howells struct task_struct task;
1915c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
19161da177e4SLinus Torvalds struct thread_info thread_info;
1917c65eacbeSAndy Lutomirski #endif
19181da177e4SLinus Torvalds unsigned long stack[THREAD_SIZE/sizeof(long)];
19191da177e4SLinus Torvalds };
19201da177e4SLinus Torvalds
19210500871fSDavid Howells #ifndef CONFIG_THREAD_INFO_IN_TASK
19220500871fSDavid Howells extern struct thread_info init_thread_info;
19230500871fSDavid Howells #endif
19240500871fSDavid Howells
19250500871fSDavid Howells extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
19260500871fSDavid Howells
1927f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
1928bcf9033eSArd Biesheuvel # define task_thread_info(task) (&(task)->thread_info)
19295e9f0c48SDavid Disseldorp #else
1930f3ac6067SIngo Molnar # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1931f3ac6067SIngo Molnar #endif
1932f3ac6067SIngo Molnar
1933198fe21bSPavel Emelyanov /*
1934198fe21bSPavel Emelyanov * find a task by one of its numerical ids
1935198fe21bSPavel Emelyanov *
1936198fe21bSPavel Emelyanov * find_task_by_pid_ns():
1937198fe21bSPavel Emelyanov * finds a task by its pid in the specified namespace
1938228ebcbeSPavel Emelyanov * find_task_by_vpid():
1939228ebcbeSPavel Emelyanov * finds a task by its virtual pid
1940198fe21bSPavel Emelyanov *
1941e49859e7SPavel Emelyanov * see also find_vpid() etc in include/linux/pid.h
1942198fe21bSPavel Emelyanov */
1943198fe21bSPavel Emelyanov
1944228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
19455eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1946198fe21bSPavel Emelyanov
19472ee08260SMike Rapoport /*
19482ee08260SMike Rapoport * find a task by its virtual pid and get the task struct
19492ee08260SMike Rapoport */
19502ee08260SMike Rapoport extern struct task_struct *find_get_task_by_vpid(pid_t nr);
19512ee08260SMike Rapoport
1952b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1953b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
19543e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
19555eca1c10SIngo Molnar
19561da177e4SLinus Torvalds #ifdef CONFIG_SMP
19571da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk);
19581da177e4SLinus Torvalds #else
kick_process(struct task_struct * tsk)19591da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { }
19601da177e4SLinus Torvalds #endif
19611da177e4SLinus Torvalds
196282b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
19633a3f61ceSKees Cook #define set_task_comm(tsk, from) ({ \
19643a3f61ceSKees Cook BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
19653a3f61ceSKees Cook __set_task_comm(tsk, from, false); \
19663a3f61ceSKees Cook })
19675eca1c10SIngo Molnar
19684cc0473dSYafang Shao /*
19694cc0473dSYafang Shao * - Why not use task_lock()?
19704cc0473dSYafang Shao * User space can randomly change their names anyway, so locking for readers
19714cc0473dSYafang Shao * doesn't make sense. For writers, locking is probably necessary, as a race
19724cc0473dSYafang Shao * condition could lead to long-term mixed results.
19734cc0473dSYafang Shao * The strscpy_pad() in __set_task_comm() can ensure that the task comm is
19744cc0473dSYafang Shao * always NUL-terminated and zero-padded. Therefore the race condition between
19754cc0473dSYafang Shao * reader and writer is not an issue.
19764cc0473dSYafang Shao *
19774cc0473dSYafang Shao * - BUILD_BUG_ON() can help prevent the buf from being truncated.
19784cc0473dSYafang Shao * Since the callers don't perform any return value checks, this safeguard is
19794cc0473dSYafang Shao * necessary.
19804cc0473dSYafang Shao */
19813756f640SArnd Bergmann #define get_task_comm(buf, tsk) ({ \
19824cc0473dSYafang Shao BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
19834cc0473dSYafang Shao strscpy_pad(buf, (tsk)->comm); \
19844cc0473dSYafang Shao buf; \
19853756f640SArnd Bergmann })
19861da177e4SLinus Torvalds
19871da177e4SLinus Torvalds #ifdef CONFIG_SMP
scheduler_ipi(void)19882a0a24ebSThomas Gleixner static __always_inline void scheduler_ipi(void)
19892a0a24ebSThomas Gleixner {
19902a0a24ebSThomas Gleixner /*
19912a0a24ebSThomas Gleixner * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
19922a0a24ebSThomas Gleixner * TIF_NEED_RESCHED remotely (for the first time) will also send
19932a0a24ebSThomas Gleixner * this IPI.
19942a0a24ebSThomas Gleixner */
19952a0a24ebSThomas Gleixner preempt_fold_need_resched();
19962a0a24ebSThomas Gleixner }
19971da177e4SLinus Torvalds #else
scheduler_ipi(void)1998184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
19991da177e4SLinus Torvalds #endif
20001da177e4SLinus Torvalds
2001d5e15866SPeter Zijlstra extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2002d5e15866SPeter Zijlstra
20035eca1c10SIngo Molnar /*
20045eca1c10SIngo Molnar * Set thread flags in other task's structures.
20055eca1c10SIngo Molnar * See asm/thread_info.h for TIF_xxxx flags available:
20061da177e4SLinus Torvalds */
set_tsk_thread_flag(struct task_struct * tsk,int flag)20071da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
20081da177e4SLinus Torvalds {
2009a1261f54SAl Viro set_ti_thread_flag(task_thread_info(tsk), flag);
20101da177e4SLinus Torvalds }
20111da177e4SLinus Torvalds
clear_tsk_thread_flag(struct task_struct * tsk,int flag)20121da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
20131da177e4SLinus Torvalds {
2014a1261f54SAl Viro clear_ti_thread_flag(task_thread_info(tsk), flag);
20151da177e4SLinus Torvalds }
20161da177e4SLinus Torvalds
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)201793ee37c2SDave Martin static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
201893ee37c2SDave Martin bool value)
201993ee37c2SDave Martin {
202093ee37c2SDave Martin update_ti_thread_flag(task_thread_info(tsk), flag, value);
202193ee37c2SDave Martin }
202293ee37c2SDave Martin
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)20231da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
20241da177e4SLinus Torvalds {
2025a1261f54SAl Viro return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
20261da177e4SLinus Torvalds }
20271da177e4SLinus Torvalds
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)20281da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
20291da177e4SLinus Torvalds {
2030a1261f54SAl Viro return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
20311da177e4SLinus Torvalds }
20321da177e4SLinus Torvalds
test_tsk_thread_flag(struct task_struct * tsk,int flag)20331da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
20341da177e4SLinus Torvalds {
2035a1261f54SAl Viro return test_ti_thread_flag(task_thread_info(tsk), flag);
20361da177e4SLinus Torvalds }
20371da177e4SLinus Torvalds
set_tsk_need_resched(struct task_struct * tsk)20381da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
20391da177e4SLinus Torvalds {
20401da177e4SLinus Torvalds set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
20411da177e4SLinus Torvalds }
20421da177e4SLinus Torvalds
clear_tsk_need_resched(struct task_struct * tsk)20431da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
20441da177e4SLinus Torvalds {
204526baa1f1SPeter Zijlstra atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
204626baa1f1SPeter Zijlstra (atomic_long_t *)&task_thread_info(tsk)->flags);
20471da177e4SLinus Torvalds }
20481da177e4SLinus Torvalds
test_tsk_need_resched(struct task_struct * tsk)20498ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
20508ae121acSGregory Haskins {
20518ae121acSGregory Haskins return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
20528ae121acSGregory Haskins }
20538ae121acSGregory Haskins
20541da177e4SLinus Torvalds /*
20551da177e4SLinus Torvalds * cond_resched() and cond_resched_lock(): latency reduction via
20561da177e4SLinus Torvalds * explicit rescheduling in places that are safe. The return
20571da177e4SLinus Torvalds * value indicates whether a reschedule was done in fact.
20581da177e4SLinus Torvalds * cond_resched_lock() will drop the spinlock before scheduling,
20591da177e4SLinus Torvalds */
2060b965f1ddSPeter Zijlstra (Intel) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2061b965f1ddSPeter Zijlstra (Intel) extern int __cond_resched(void);
2062b965f1ddSPeter Zijlstra (Intel)
206399cf983cSMark Rutland #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2064b965f1ddSPeter Zijlstra (Intel)
2065e3ff7c60SJosh Poimboeuf void sched_dynamic_klp_enable(void);
2066e3ff7c60SJosh Poimboeuf void sched_dynamic_klp_disable(void);
2067e3ff7c60SJosh Poimboeuf
2068b965f1ddSPeter Zijlstra (Intel) DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2069b965f1ddSPeter Zijlstra (Intel)
_cond_resched(void)2070b965f1ddSPeter Zijlstra (Intel) static __always_inline int _cond_resched(void)
2071b965f1ddSPeter Zijlstra (Intel) {
2072ef72661eSPeter Zijlstra return static_call_mod(cond_resched)();
2073b965f1ddSPeter Zijlstra (Intel) }
2074b965f1ddSPeter Zijlstra (Intel)
207599cf983cSMark Rutland #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2076e3ff7c60SJosh Poimboeuf
207799cf983cSMark Rutland extern int dynamic_cond_resched(void);
207899cf983cSMark Rutland
_cond_resched(void)207999cf983cSMark Rutland static __always_inline int _cond_resched(void)
208099cf983cSMark Rutland {
208199cf983cSMark Rutland return dynamic_cond_resched();
208299cf983cSMark Rutland }
208399cf983cSMark Rutland
2084e3ff7c60SJosh Poimboeuf #else /* !CONFIG_PREEMPTION */
2085b965f1ddSPeter Zijlstra (Intel)
_cond_resched(void)2086b965f1ddSPeter Zijlstra (Intel) static inline int _cond_resched(void)
2087b965f1ddSPeter Zijlstra (Intel) {
2088e3ff7c60SJosh Poimboeuf klp_sched_try_switch();
2089b965f1ddSPeter Zijlstra (Intel) return __cond_resched();
2090b965f1ddSPeter Zijlstra (Intel) }
2091b965f1ddSPeter Zijlstra (Intel)
2092e3ff7c60SJosh Poimboeuf #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
2093b965f1ddSPeter Zijlstra (Intel)
2094e3ff7c60SJosh Poimboeuf #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
2095b965f1ddSPeter Zijlstra (Intel)
_cond_resched(void)2096e3ff7c60SJosh Poimboeuf static inline int _cond_resched(void)
2097e3ff7c60SJosh Poimboeuf {
2098e3ff7c60SJosh Poimboeuf klp_sched_try_switch();
2099e3ff7c60SJosh Poimboeuf return 0;
2100e3ff7c60SJosh Poimboeuf }
2101b965f1ddSPeter Zijlstra (Intel)
2102e3ff7c60SJosh Poimboeuf #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
21036f80bd98SFrederic Weisbecker
2104613afbf8SFrederic Weisbecker #define cond_resched() ({ \
2105874f670eSThomas Gleixner __might_resched(__FILE__, __LINE__, 0); \
2106613afbf8SFrederic Weisbecker _cond_resched(); \
2107613afbf8SFrederic Weisbecker })
21086f80bd98SFrederic Weisbecker
2109613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2110f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_read(rwlock_t *lock);
2111f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_write(rwlock_t *lock);
2112613afbf8SFrederic Weisbecker
211350e081b9SThomas Gleixner #define MIGHT_RESCHED_RCU_SHIFT 8
211450e081b9SThomas Gleixner #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
211550e081b9SThomas Gleixner
21163e9cc688SThomas Gleixner #ifndef CONFIG_PREEMPT_RT
21173e9cc688SThomas Gleixner /*
21183e9cc688SThomas Gleixner * Non RT kernels have an elevated preempt count due to the held lock,
21193e9cc688SThomas Gleixner * but are not allowed to be inside a RCU read side critical section
21203e9cc688SThomas Gleixner */
21213e9cc688SThomas Gleixner # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
21223e9cc688SThomas Gleixner #else
21233e9cc688SThomas Gleixner /*
21243e9cc688SThomas Gleixner * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
21253e9cc688SThomas Gleixner * cond_resched*lock() has to take that into account because it checks for
21263e9cc688SThomas Gleixner * preempt_count() and rcu_preempt_depth().
21273e9cc688SThomas Gleixner */
21283e9cc688SThomas Gleixner # define PREEMPT_LOCK_RESCHED_OFFSETS \
21293e9cc688SThomas Gleixner (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
21303e9cc688SThomas Gleixner #endif
21313e9cc688SThomas Gleixner
2132613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({ \
21333e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2134613afbf8SFrederic Weisbecker __cond_resched_lock(lock); \
2135613afbf8SFrederic Weisbecker })
2136613afbf8SFrederic Weisbecker
2137f3d4b4b1SBen Gardon #define cond_resched_rwlock_read(lock) ({ \
21383e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2139f3d4b4b1SBen Gardon __cond_resched_rwlock_read(lock); \
2140f3d4b4b1SBen Gardon })
2141f3d4b4b1SBen Gardon
2142f3d4b4b1SBen Gardon #define cond_resched_rwlock_write(lock) ({ \
21433e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2144f3d4b4b1SBen Gardon __cond_resched_rwlock_write(lock); \
2145f3d4b4b1SBen Gardon })
2146f3d4b4b1SBen Gardon
need_resched(void)214775f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
214875f93fedSPeter Zijlstra {
214975f93fedSPeter Zijlstra return unlikely(tif_need_resched());
215075f93fedSPeter Zijlstra }
215175f93fedSPeter Zijlstra
2152ee761f62SThomas Gleixner /*
21531da177e4SLinus Torvalds * Wrappers for p->thread_info->cpu access. No-op on UP.
21541da177e4SLinus Torvalds */
21551da177e4SLinus Torvalds #ifdef CONFIG_SMP
21561da177e4SLinus Torvalds
task_cpu(const struct task_struct * p)21571da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
21581da177e4SLinus Torvalds {
2159c546951dSAndrea Parri return READ_ONCE(task_thread_info(p)->cpu);
21601da177e4SLinus Torvalds }
21611da177e4SLinus Torvalds
2162c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
21631da177e4SLinus Torvalds
21641da177e4SLinus Torvalds #else
21651da177e4SLinus Torvalds
task_cpu(const struct task_struct * p)21661da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
21671da177e4SLinus Torvalds {
21681da177e4SLinus Torvalds return 0;
21691da177e4SLinus Torvalds }
21701da177e4SLinus Torvalds
set_task_cpu(struct task_struct * p,unsigned int cpu)21711da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
21721da177e4SLinus Torvalds {
21731da177e4SLinus Torvalds }
21741da177e4SLinus Torvalds
21751da177e4SLinus Torvalds #endif /* CONFIG_SMP */
21761da177e4SLinus Torvalds
task_is_runnable(struct task_struct * p)2177cd9626e9SPeter Zijlstra static inline bool task_is_runnable(struct task_struct *p)
2178cd9626e9SPeter Zijlstra {
2179cd9626e9SPeter Zijlstra return p->on_rq && !p->se.sched_delayed;
2180cd9626e9SPeter Zijlstra }
2181cd9626e9SPeter Zijlstra
2182a1dfb631SMarcelo Tosatti extern bool sched_task_on_rq(struct task_struct *p);
218342a20f86SKees Cook extern unsigned long get_wchan(struct task_struct *p);
2184e386b672SPaul E. McKenney extern struct task_struct *cpu_curr_snapshot(int cpu);
2185a1dfb631SMarcelo Tosatti
218672375a88SKent Overstreet #include <linux/spinlock.h>
218772375a88SKent Overstreet
2188d9345c65SPan Xinhui /*
2189d9345c65SPan Xinhui * In order to reduce various lock holder preemption latencies provide an
2190d9345c65SPan Xinhui * interface to see if a vCPU is currently running or not.
2191d9345c65SPan Xinhui *
2192d9345c65SPan Xinhui * This allows us to terminate optimistic spin loops and block, analogous to
2193d9345c65SPan Xinhui * the native optimistic spin heuristic of testing if the lock owner task is
2194d9345c65SPan Xinhui * running or not.
2195d9345c65SPan Xinhui */
2196d9345c65SPan Xinhui #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)219742fd8baaSQian Cai static inline bool vcpu_is_preempted(int cpu)
219842fd8baaSQian Cai {
219942fd8baaSQian Cai return false;
220042fd8baaSQian Cai }
2201d9345c65SPan Xinhui #endif
2202d9345c65SPan Xinhui
220396f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
220496f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
22055c45bf27SSiddha, Suresh B
220682455257SDave Hansen #ifndef TASK_SIZE_OF
220782455257SDave Hansen #define TASK_SIZE_OF(tsk) TASK_SIZE
220882455257SDave Hansen #endif
220982455257SDave Hansen
2210a5418be9SViresh Kumar #ifdef CONFIG_SMP
owner_on_cpu(struct task_struct * owner)2211c0bed69dSKefeng Wang static inline bool owner_on_cpu(struct task_struct *owner)
2212c0bed69dSKefeng Wang {
2213c0bed69dSKefeng Wang /*
2214c0bed69dSKefeng Wang * As lock holder preemption issue, we both skip spinning if
2215c0bed69dSKefeng Wang * task is not on cpu or its cpu is preempted
2216c0bed69dSKefeng Wang */
22174cf75fd4SMarco Elver return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2218c0bed69dSKefeng Wang }
2219c0bed69dSKefeng Wang
2220a5418be9SViresh Kumar /* Returns effective CPU energy utilization, as seen by the scheduler */
2221bb447999SDietmar Eggemann unsigned long sched_cpu_util(int cpu);
2222a5418be9SViresh Kumar #endif /* CONFIG_SMP */
2223a5418be9SViresh Kumar
22246e33cad0SPeter Zijlstra #ifdef CONFIG_SCHED_CORE
22256e33cad0SPeter Zijlstra extern void sched_core_free(struct task_struct *tsk);
222685dd3f61SPeter Zijlstra extern void sched_core_fork(struct task_struct *p);
22277ac592aaSChris Hyser extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
22287ac592aaSChris Hyser unsigned long uaddr);
2229548796e2SCruz Zhao extern int sched_core_idle_cpu(int cpu);
22306e33cad0SPeter Zijlstra #else
sched_core_free(struct task_struct * tsk)22316e33cad0SPeter Zijlstra static inline void sched_core_free(struct task_struct *tsk) { }
sched_core_fork(struct task_struct * p)223285dd3f61SPeter Zijlstra static inline void sched_core_fork(struct task_struct *p) { }
sched_core_idle_cpu(int cpu)2233548796e2SCruz Zhao static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
22346e33cad0SPeter Zijlstra #endif
22356e33cad0SPeter Zijlstra
2236d664e399SThomas Gleixner extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2237d664e399SThomas Gleixner
223822d407b1SSuren Baghdasaryan #ifdef CONFIG_MEM_ALLOC_PROFILING
alloc_tag_save(struct alloc_tag * tag)22395a5aa3c3SSuren Baghdasaryan static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
224022d407b1SSuren Baghdasaryan {
224122d407b1SSuren Baghdasaryan swap(current->alloc_tag, tag);
224222d407b1SSuren Baghdasaryan return tag;
224322d407b1SSuren Baghdasaryan }
224422d407b1SSuren Baghdasaryan
alloc_tag_restore(struct alloc_tag * tag,struct alloc_tag * old)22455a5aa3c3SSuren Baghdasaryan static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
224622d407b1SSuren Baghdasaryan {
224722d407b1SSuren Baghdasaryan #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
224822d407b1SSuren Baghdasaryan WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
224922d407b1SSuren Baghdasaryan #endif
225022d407b1SSuren Baghdasaryan current->alloc_tag = old;
225122d407b1SSuren Baghdasaryan }
225222d407b1SSuren Baghdasaryan #else
225322d407b1SSuren Baghdasaryan #define alloc_tag_save(_tag) NULL
225422d407b1SSuren Baghdasaryan #define alloc_tag_restore(_tag, _old) do {} while (0)
225522d407b1SSuren Baghdasaryan #endif
225622d407b1SSuren Baghdasaryan
22571da177e4SLinus Torvalds #endif
2258