1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 /* 4 * Using the avg_vruntime, do the right thing and preserve lag across 5 * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. 6 */ 7 SCHED_FEAT(PLACE_LAG, true) 8 /* 9 * Give new tasks half a slice to ease into the competition. 10 */ 11 SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) 12 /* 13 * Preserve relative virtual deadline on 'migration'. 14 */ 15 SCHED_FEAT(PLACE_REL_DEADLINE, true) 16 /* 17 * Inhibit (wakeup) preemption until the current task has either matched the 18 * 0-lag point or until is has exhausted it's slice. 19 */ 20 SCHED_FEAT(RUN_TO_PARITY, true) 21 /* 22 * Allow wakeup of tasks with a shorter slice to cancel RUN_TO_PARITY for 23 * current. 24 */ 25 SCHED_FEAT(PREEMPT_SHORT, true) 26 27 /* 28 * Prefer to schedule the task we woke last (assuming it failed 29 * wakeup-preemption), since its likely going to consume data we 30 * touched, increases cache locality. 31 */ 32 SCHED_FEAT(NEXT_BUDDY, false) 33 34 /* 35 * Allow completely ignoring cfs_rq->next; which can be set from various 36 * places: 37 * - NEXT_BUDDY (wakeup preemption) 38 * - yield_to_task() 39 * - cgroup dequeue / pick 40 */ 41 SCHED_FEAT(PICK_BUDDY, true) 42 43 /* 44 * Consider buddies to be cache hot, decreases the likeliness of a 45 * cache buddy being migrated away, increases cache locality. 46 */ 47 SCHED_FEAT(CACHE_HOT_BUDDY, true) 48 49 /* 50 * Delay dequeueing tasks until they get selected or woken. 51 * 52 * By delaying the dequeue for non-eligible tasks, they remain in the 53 * competition and can burn off their negative lag. When they get selected 54 * they'll have positive lag by definition. 55 * 56 * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0. 57 */ 58 SCHED_FEAT(DELAY_DEQUEUE, true) 59 SCHED_FEAT(DELAY_ZERO, true) 60 61 /* 62 * Allow wakeup-time preemption of the current task: 63 */ 64 SCHED_FEAT(WAKEUP_PREEMPTION, true) 65 66 SCHED_FEAT(HRTICK, false) 67 SCHED_FEAT(HRTICK_DL, false) 68 69 /* 70 * Decrement CPU capacity based on time not spent running tasks 71 */ 72 SCHED_FEAT(NONTASK_CAPACITY, true) 73 74 #ifdef CONFIG_PREEMPT_RT 75 SCHED_FEAT(TTWU_QUEUE, false) 76 #else 77 78 /* 79 * Queue remote wakeups on the target CPU and process them 80 * using the scheduler IPI. Reduces rq->lock contention/bounces. 81 */ 82 SCHED_FEAT(TTWU_QUEUE, true) 83 #endif 84 85 /* 86 * When doing wakeups, attempt to limit superfluous scans of the LLC domain. 87 */ 88 SCHED_FEAT(SIS_UTIL, true) 89 90 /* 91 * Issue a WARN when we do multiple update_rq_clock() calls 92 * in a single rq->lock section. Default disabled because the 93 * annotations are not complete. 94 */ 95 SCHED_FEAT(WARN_DOUBLE_CLOCK, false) 96 97 #ifdef HAVE_RT_PUSH_IPI 98 /* 99 * In order to avoid a thundering herd attack of CPUs that are 100 * lowering their priorities at the same time, and there being 101 * a single CPU that has an RT task that can migrate and is waiting 102 * to run, where the other CPUs will try to take that CPUs 103 * rq lock and possibly create a large contention, sending an 104 * IPI to that CPU and let that CPU push the RT task to where 105 * it should go may be a better scenario. 106 */ 107 SCHED_FEAT(RT_PUSH_IPI, true) 108 #endif 109 110 SCHED_FEAT(RT_RUNTIME_SHARE, false) 111 SCHED_FEAT(LB_MIN, false) 112 SCHED_FEAT(ATTACH_AGE_LOAD, true) 113 114 SCHED_FEAT(WA_IDLE, true) 115 SCHED_FEAT(WA_WEIGHT, true) 116 SCHED_FEAT(WA_BIAS, true) 117 118 /* 119 * UtilEstimation. Use estimated CPU utilization. 120 */ 121 SCHED_FEAT(UTIL_EST, true) 122 123 SCHED_FEAT(LATENCY_WARN, false) 124