xref: /linux/kernel/sched/features.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /*
4  * Using the avg_vruntime, do the right thing and preserve lag across
5  * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
6  */
7 SCHED_FEAT(PLACE_LAG, true)
8 /*
9  * Give new tasks half a slice to ease into the competition.
10  */
11 SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
12 /*
13  * Preserve relative virtual deadline on 'migration'.
14  */
15 SCHED_FEAT(PLACE_REL_DEADLINE, true)
16 /*
17  * Inhibit (wakeup) preemption until the current task has either matched the
18  * 0-lag point or until is has exhausted it's slice.
19  */
20 SCHED_FEAT(RUN_TO_PARITY, true)
21 /*
22  * Allow wakeup of tasks with a shorter slice to cancel RUN_TO_PARITY for
23  * current.
24  */
25 SCHED_FEAT(PREEMPT_SHORT, true)
26 
27 /*
28  * Prefer to schedule the task we woke last (assuming it failed
29  * wakeup-preemption), since its likely going to consume data we
30  * touched, increases cache locality.
31  */
32 SCHED_FEAT(NEXT_BUDDY, false)
33 
34 /*
35  * Consider buddies to be cache hot, decreases the likeliness of a
36  * cache buddy being migrated away, increases cache locality.
37  */
38 SCHED_FEAT(CACHE_HOT_BUDDY, true)
39 
40 /*
41  * Delay dequeueing tasks until they get selected or woken.
42  *
43  * By delaying the dequeue for non-eligible tasks, they remain in the
44  * competition and can burn off their negative lag. When they get selected
45  * they'll have positive lag by definition.
46  *
47  * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0.
48  */
49 SCHED_FEAT(DELAY_DEQUEUE, true)
50 SCHED_FEAT(DELAY_ZERO, true)
51 
52 /*
53  * Allow wakeup-time preemption of the current task:
54  */
55 SCHED_FEAT(WAKEUP_PREEMPTION, true)
56 
57 SCHED_FEAT(HRTICK, false)
58 SCHED_FEAT(HRTICK_DL, false)
59 
60 /*
61  * Decrement CPU capacity based on time not spent running tasks
62  */
63 SCHED_FEAT(NONTASK_CAPACITY, true)
64 
65 #ifdef CONFIG_PREEMPT_RT
66 SCHED_FEAT(TTWU_QUEUE, false)
67 #else
68 
69 /*
70  * Queue remote wakeups on the target CPU and process them
71  * using the scheduler IPI. Reduces rq->lock contention/bounces.
72  */
73 SCHED_FEAT(TTWU_QUEUE, true)
74 #endif
75 
76 /*
77  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
78  */
79 SCHED_FEAT(SIS_UTIL, true)
80 
81 /*
82  * Issue a WARN when we do multiple update_rq_clock() calls
83  * in a single rq->lock section. Default disabled because the
84  * annotations are not complete.
85  */
86 SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
87 
88 #ifdef HAVE_RT_PUSH_IPI
89 /*
90  * In order to avoid a thundering herd attack of CPUs that are
91  * lowering their priorities at the same time, and there being
92  * a single CPU that has an RT task that can migrate and is waiting
93  * to run, where the other CPUs will try to take that CPUs
94  * rq lock and possibly create a large contention, sending an
95  * IPI to that CPU and let that CPU push the RT task to where
96  * it should go may be a better scenario.
97  */
98 SCHED_FEAT(RT_PUSH_IPI, true)
99 #endif
100 
101 SCHED_FEAT(RT_RUNTIME_SHARE, false)
102 SCHED_FEAT(LB_MIN, false)
103 SCHED_FEAT(ATTACH_AGE_LOAD, true)
104 
105 SCHED_FEAT(WA_IDLE, true)
106 SCHED_FEAT(WA_WEIGHT, true)
107 SCHED_FEAT(WA_BIAS, true)
108 
109 /*
110  * UtilEstimation. Use estimated CPU utilization.
111  */
112 SCHED_FEAT(UTIL_EST, true)
113 
114 SCHED_FEAT(LATENCY_WARN, false)
115