1 #ifdef CONFIG_SMP 2 #include "sched-pelt.h" 3 4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 9 10 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); 12 13 static inline u64 thermal_load_avg(struct rq *rq) 14 { 15 return READ_ONCE(rq->avg_thermal.load_avg); 16 } 17 #else 18 static inline int 19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) 20 { 21 return 0; 22 } 23 24 static inline u64 thermal_load_avg(struct rq *rq) 25 { 26 return 0; 27 } 28 #endif 29 30 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 31 int update_irq_load_avg(struct rq *rq, u64 running); 32 #else 33 static inline int 34 update_irq_load_avg(struct rq *rq, u64 running) 35 { 36 return 0; 37 } 38 #endif 39 40 #define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024) 41 42 static inline u32 get_pelt_divider(struct sched_avg *avg) 43 { 44 return PELT_MIN_DIVIDER + avg->period_contrib; 45 } 46 47 static inline void cfs_se_util_change(struct sched_avg *avg) 48 { 49 unsigned int enqueued; 50 51 if (!sched_feat(UTIL_EST)) 52 return; 53 54 /* Avoid store if the flag has been already reset */ 55 enqueued = avg->util_est.enqueued; 56 if (!(enqueued & UTIL_AVG_UNCHANGED)) 57 return; 58 59 /* Reset flag to report util_avg has been updated */ 60 enqueued &= ~UTIL_AVG_UNCHANGED; 61 WRITE_ONCE(avg->util_est.enqueued, enqueued); 62 } 63 64 /* 65 * The clock_pelt scales the time to reflect the effective amount of 66 * computation done during the running delta time but then sync back to 67 * clock_task when rq is idle. 68 * 69 * 70 * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16 71 * @ max capacity ------******---------------******--------------- 72 * @ half capacity ------************---------************--------- 73 * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16 74 * 75 */ 76 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) 77 { 78 if (unlikely(is_idle_task(rq->curr))) { 79 /* The rq is idle, we can sync to clock_task */ 80 rq->clock_pelt = rq_clock_task(rq); 81 return; 82 } 83 84 /* 85 * When a rq runs at a lower compute capacity, it will need 86 * more time to do the same amount of work than at max 87 * capacity. In order to be invariant, we scale the delta to 88 * reflect how much work has been really done. 89 * Running longer results in stealing idle time that will 90 * disturb the load signal compared to max capacity. This 91 * stolen idle time will be automatically reflected when the 92 * rq will be idle and the clock will be synced with 93 * rq_clock_task. 94 */ 95 96 /* 97 * Scale the elapsed time to reflect the real amount of 98 * computation 99 */ 100 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); 101 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); 102 103 rq->clock_pelt += delta; 104 } 105 106 /* 107 * When rq becomes idle, we have to check if it has lost idle time 108 * because it was fully busy. A rq is fully used when the /Sum util_sum 109 * is greater or equal to: 110 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT; 111 * For optimization and computing rounding purpose, we don't take into account 112 * the position in the current window (period_contrib) and we use the higher 113 * bound of util_sum to decide. 114 */ 115 static inline void update_idle_rq_clock_pelt(struct rq *rq) 116 { 117 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX; 118 u32 util_sum = rq->cfs.avg.util_sum; 119 util_sum += rq->avg_rt.util_sum; 120 util_sum += rq->avg_dl.util_sum; 121 122 /* 123 * Reflecting stolen time makes sense only if the idle 124 * phase would be present at max capacity. As soon as the 125 * utilization of a rq has reached the maximum value, it is 126 * considered as an always running rq without idle time to 127 * steal. This potential idle time is considered as lost in 128 * this case. We keep track of this lost idle time compare to 129 * rq's clock_task. 130 */ 131 if (util_sum >= divider) 132 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; 133 } 134 135 static inline u64 rq_clock_pelt(struct rq *rq) 136 { 137 lockdep_assert_rq_held(rq); 138 assert_clock_updated(rq); 139 140 return rq->clock_pelt - rq->lost_idle_time; 141 } 142 143 #ifdef CONFIG_CFS_BANDWIDTH 144 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ 145 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) 146 { 147 if (unlikely(cfs_rq->throttle_count)) 148 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; 149 150 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; 151 } 152 #else 153 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) 154 { 155 return rq_clock_pelt(rq_of(cfs_rq)); 156 } 157 #endif 158 159 #else 160 161 static inline int 162 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 163 { 164 return 0; 165 } 166 167 static inline int 168 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) 169 { 170 return 0; 171 } 172 173 static inline int 174 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) 175 { 176 return 0; 177 } 178 179 static inline int 180 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) 181 { 182 return 0; 183 } 184 185 static inline u64 thermal_load_avg(struct rq *rq) 186 { 187 return 0; 188 } 189 190 static inline int 191 update_irq_load_avg(struct rq *rq, u64 running) 192 { 193 return 0; 194 } 195 196 static inline u64 rq_clock_pelt(struct rq *rq) 197 { 198 return rq_clock_task(rq); 199 } 200 201 static inline void 202 update_rq_clock_pelt(struct rq *rq, s64 delta) { } 203 204 static inline void 205 update_idle_rq_clock_pelt(struct rq *rq) { } 206 207 #endif 208 209 210