xref: /linux/kernel/sched/features.h (revision 2586af1ac187f6b3a50930a4e33497074e81762d)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2391e43daSPeter Zijlstra /*
3391e43daSPeter Zijlstra  * Only give sleepers 50% of their service deficit. This allows
4391e43daSPeter Zijlstra  * them to run sooner, but does not allow tons of sleepers to
5391e43daSPeter Zijlstra  * rip the spread apart.
6391e43daSPeter Zijlstra  */
7f8b6d1ccSPeter Zijlstra SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
8391e43daSPeter Zijlstra 
9391e43daSPeter Zijlstra /*
10391e43daSPeter Zijlstra  * Place new tasks ahead so that they do not starve already running
11391e43daSPeter Zijlstra  * tasks
12391e43daSPeter Zijlstra  */
13f8b6d1ccSPeter Zijlstra SCHED_FEAT(START_DEBIT, true)
14391e43daSPeter Zijlstra 
15391e43daSPeter Zijlstra /*
16391e43daSPeter Zijlstra  * Prefer to schedule the task we woke last (assuming it failed
17391e43daSPeter Zijlstra  * wakeup-preemption), since its likely going to consume data we
18391e43daSPeter Zijlstra  * touched, increases cache locality.
19391e43daSPeter Zijlstra  */
20f8b6d1ccSPeter Zijlstra SCHED_FEAT(NEXT_BUDDY, false)
21391e43daSPeter Zijlstra 
22391e43daSPeter Zijlstra /*
23391e43daSPeter Zijlstra  * Prefer to schedule the task that ran last (when we did
24391e43daSPeter Zijlstra  * wake-preempt) as that likely will touch the same data, increases
25391e43daSPeter Zijlstra  * cache locality.
26391e43daSPeter Zijlstra  */
27f8b6d1ccSPeter Zijlstra SCHED_FEAT(LAST_BUDDY, true)
28391e43daSPeter Zijlstra 
29391e43daSPeter Zijlstra /*
30391e43daSPeter Zijlstra  * Consider buddies to be cache hot, decreases the likelyness of a
31391e43daSPeter Zijlstra  * cache buddy being migrated away, increases cache locality.
32391e43daSPeter Zijlstra  */
33f8b6d1ccSPeter Zijlstra SCHED_FEAT(CACHE_HOT_BUDDY, true)
34391e43daSPeter Zijlstra 
35391e43daSPeter Zijlstra /*
368ed92e51SIngo Molnar  * Allow wakeup-time preemption of the current task:
378ed92e51SIngo Molnar  */
388ed92e51SIngo Molnar SCHED_FEAT(WAKEUP_PREEMPTION, true)
398ed92e51SIngo Molnar 
40f8b6d1ccSPeter Zijlstra SCHED_FEAT(HRTICK, false)
41f8b6d1ccSPeter Zijlstra SCHED_FEAT(DOUBLE_TICK, false)
42391e43daSPeter Zijlstra 
43391e43daSPeter Zijlstra /*
445d4dfdddSNicolas Pitre  * Decrement CPU capacity based on time not spent running tasks
45391e43daSPeter Zijlstra  */
465d4dfdddSNicolas Pitre SCHED_FEAT(NONTASK_CAPACITY, true)
47391e43daSPeter Zijlstra 
48391e43daSPeter Zijlstra /*
49391e43daSPeter Zijlstra  * Queue remote wakeups on the target CPU and process them
50391e43daSPeter Zijlstra  * using the scheduler IPI. Reduces rq->lock contention/bounces.
51391e43daSPeter Zijlstra  */
52f8b6d1ccSPeter Zijlstra SCHED_FEAT(TTWU_QUEUE, true)
53391e43daSPeter Zijlstra 
544c77b18cSPeter Zijlstra /*
554c77b18cSPeter Zijlstra  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
564c77b18cSPeter Zijlstra  */
574c77b18cSPeter Zijlstra SCHED_FEAT(SIS_AVG_CPU, false)
581ad3aaf3SPeter Zijlstra SCHED_FEAT(SIS_PROP, true)
594c77b18cSPeter Zijlstra 
6026ae58d2SPeter Zijlstra /*
6126ae58d2SPeter Zijlstra  * Issue a WARN when we do multiple update_rq_clock() calls
6226ae58d2SPeter Zijlstra  * in a single rq->lock section. Default disabled because the
6326ae58d2SPeter Zijlstra  * annotations are not complete.
6426ae58d2SPeter Zijlstra  */
6526ae58d2SPeter Zijlstra SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
6626ae58d2SPeter Zijlstra 
67b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
68b6366f04SSteven Rostedt /*
69b6366f04SSteven Rostedt  * In order to avoid a thundering herd attack of CPUs that are
70b6366f04SSteven Rostedt  * lowering their priorities at the same time, and there being
71b6366f04SSteven Rostedt  * a single CPU that has an RT task that can migrate and is waiting
72b6366f04SSteven Rostedt  * to run, where the other CPUs will try to take that CPUs
73b6366f04SSteven Rostedt  * rq lock and possibly create a large contention, sending an
74b6366f04SSteven Rostedt  * IPI to that CPU and let that CPU push the RT task to where
75b6366f04SSteven Rostedt  * it should go may be a better scenario.
76b6366f04SSteven Rostedt  */
77b6366f04SSteven Rostedt SCHED_FEAT(RT_PUSH_IPI, true)
78b6366f04SSteven Rostedt #endif
79b6366f04SSteven Rostedt 
80*2586af1aSDaniel Bristot de Oliveira SCHED_FEAT(RT_RUNTIME_SHARE, false)
81eb95308eSPeter Zijlstra SCHED_FEAT(LB_MIN, false)
82a9280514SPeter Zijlstra SCHED_FEAT(ATTACH_AGE_LOAD, true)
83a9280514SPeter Zijlstra 
84d153b153SPeter Zijlstra SCHED_FEAT(WA_IDLE, true)
85f2cdd9ccSPeter Zijlstra SCHED_FEAT(WA_WEIGHT, true)
86f2cdd9ccSPeter Zijlstra SCHED_FEAT(WA_BIAS, true)
877f65ea42SPatrick Bellasi 
887f65ea42SPatrick Bellasi /*
897f65ea42SPatrick Bellasi  * UtilEstimation. Use estimated CPU utilization.
907f65ea42SPatrick Bellasi  */
91d519329fSPatrick Bellasi SCHED_FEAT(UTIL_EST, true)
92b8c96361SPatrick Bellasi SCHED_FEAT(UTIL_EST_FASTUP, true)
93