1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 /* 4 * Using the avg_vruntime, do the right thing and preserve lag across 5 * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. 6 */ 7 SCHED_FEAT(PLACE_LAG, true) 8 /* 9 * Give new tasks half a slice to ease into the competition. 10 */ 11 SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) 12 /* 13 * Preserve relative virtual deadline on 'migration'. 14 */ 15 SCHED_FEAT(PLACE_REL_DEADLINE, true) 16 /* 17 * Inhibit (wakeup) preemption until the current task has either matched the 18 * 0-lag point or until is has exhausted it's slice. 19 */ 20 SCHED_FEAT(RUN_TO_PARITY, true) 21 /* 22 * Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for 23 * current. 24 */ 25 SCHED_FEAT(PREEMPT_SHORT, true) 26 27 /* 28 * Prefer to schedule the task we woke last (assuming it failed 29 * wakeup-preemption), since its likely going to consume data we 30 * touched, increases cache locality. 31 */ 32 SCHED_FEAT(NEXT_BUDDY, false) 33 34 /* 35 * Consider buddies to be cache hot, decreases the likeliness of a 36 * cache buddy being migrated away, increases cache locality. 37 */ 38 SCHED_FEAT(CACHE_HOT_BUDDY, true) 39 40 /* 41 * Delay dequeueing tasks until they get selected or woken. 42 * 43 * By delaying the dequeue for non-eligible tasks, they remain in the 44 * competition and can burn off their negative lag. When they get selected 45 * they'll have positive lag by definition. 46 * 47 * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0. 48 */ 49 SCHED_FEAT(DELAY_DEQUEUE, true) 50 SCHED_FEAT(DELAY_ZERO, true) 51 52 /* 53 * Allow wakeup-time preemption of the current task: 54 */ 55 SCHED_FEAT(WAKEUP_PREEMPTION, true) 56 57 SCHED_FEAT(HRTICK, false) 58 SCHED_FEAT(HRTICK_DL, false) 59 SCHED_FEAT(DOUBLE_TICK, false) 60 61 /* 62 * Decrement CPU capacity based on time not spent running tasks 63 */ 64 SCHED_FEAT(NONTASK_CAPACITY, true) 65 66 #ifdef CONFIG_PREEMPT_RT 67 SCHED_FEAT(TTWU_QUEUE, false) 68 #else 69 70 /* 71 * Queue remote wakeups on the target CPU and process them 72 * using the scheduler IPI. Reduces rq->lock contention/bounces. 73 */ 74 SCHED_FEAT(TTWU_QUEUE, true) 75 #endif 76 77 /* 78 * When doing wakeups, attempt to limit superfluous scans of the LLC domain. 79 */ 80 SCHED_FEAT(SIS_UTIL, true) 81 82 /* 83 * Issue a WARN when we do multiple update_rq_clock() calls 84 * in a single rq->lock section. Default disabled because the 85 * annotations are not complete. 86 */ 87 SCHED_FEAT(WARN_DOUBLE_CLOCK, false) 88 89 #ifdef HAVE_RT_PUSH_IPI 90 /* 91 * In order to avoid a thundering herd attack of CPUs that are 92 * lowering their priorities at the same time, and there being 93 * a single CPU that has an RT task that can migrate and is waiting 94 * to run, where the other CPUs will try to take that CPUs 95 * rq lock and possibly create a large contention, sending an 96 * IPI to that CPU and let that CPU push the RT task to where 97 * it should go may be a better scenario. 98 */ 99 SCHED_FEAT(RT_PUSH_IPI, true) 100 #endif 101 102 SCHED_FEAT(RT_RUNTIME_SHARE, false) 103 SCHED_FEAT(LB_MIN, false) 104 SCHED_FEAT(ATTACH_AGE_LOAD, true) 105 106 SCHED_FEAT(WA_IDLE, true) 107 SCHED_FEAT(WA_WEIGHT, true) 108 SCHED_FEAT(WA_BIAS, true) 109 110 /* 111 * UtilEstimation. Use estimated CPU utilization. 112 */ 113 SCHED_FEAT(UTIL_EST, true) 114 115 SCHED_FEAT(LATENCY_WARN, false) 116